hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
3d0e8304d8197dec12c238b3b7ca625255a541e9.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <ATen/native/hip/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { const char bessel_y0_name[] = "bessel_y0_forward"; void bessel_y0_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_cuda", [&]() { jitted_gpu_kernel<bessel_y0_name, scalar_t, scalar_t, 1>(iterator, bessel_y0_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return bessel_y0_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_bessel_y0_stub, &bessel_y0_kernel_cuda); } // namespace at::native
3d0e8304d8197dec12c238b3b7ca625255a541e9.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { const char bessel_y0_name[] = "bessel_y0_forward"; void bessel_y0_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_cuda", [&]() { jitted_gpu_kernel<bessel_y0_name, scalar_t, scalar_t, 1>(iterator, bessel_y0_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y0_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return bessel_y0_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_bessel_y0_stub, &bessel_y0_kernel_cuda); } // namespace at::native
18b781f877d06170d3193a9a295c5304c1c98707.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <math_functions.hpp> #include "common/include/type.h" #include "common/include/common.h" //====================template /* calc the mean by row dimension */ template<typename T> __global__ void mean_by_rows(T *mat_device, T *mean_vec, u32 rows, u32 cols){ u32 idy = blockIdx.y*blockDim.y + threadIdx.y; u32 idx = blockIdx.x*blockDim.x + threadIdx.x; u32 thread_idx = idy*(gridDim.x*blockIdx.x) + idx; if(thread_idx >= cols) return ; T mean = (T)0; T cur_val = (T)0; for (u32 i = 0; i < rows; i++){ cur_val = mat_device[i*cols+thread_idx]; // in case of sum is too big mean = mean*((float)i/(i+1)) + cur_val/(double)(i+1); } mean_vec[thread_idx] = mean; } /* each row subtract the mean vector */ template<typename T> __global__ void zero_mean_by_rows(T *mat_device, T *mean_vec, u32 rows, u32 cols){ u32 idy = blockIdx.y*blockDim.y + threadIdx.y; u32 idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < cols && idy < rows){ u32 val_idx = idy*cols + idx; mat_device[val_idx] -= mean_vec[idx]; } } /* calc the std by rows dimension */ template<typename T> __global__ void std_by_rows(T *mat_device, T *mean_vec, T *std_vec, u32 rows, u32 cols){ u32 idy = blockIdx.y*blockDim.y + threadIdx.y; u32 idx = blockIdx.x*blockDim.x + threadIdx.x; u32 thread_idx = idy*(gridDim.x*blockIdx.x) + idx; if (thread_idx >= cols) return ; T cur_val; T std = T(0); for (size_t i = 0; i< rows; i++){ cur_val = mat_device[i*cols+thread_idx]; // in case of std sum is bigger than limits std = std*((float)i/(i+1)) + cur_val*cur_val/(double)(i+1); } std_vec[thread_idx] = sqrt(std); } /* each row divide the std vector */ template<typename T> __global__ void one_std_by_rows(T *mat_device, T *std_vec, u32 rows, u32 cols){ u32 idy = blockIdx.y*blockDim.y + threadIdx.y; u32 idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < cols && idy < rows){ u32 val_idx = idy*cols + idx; mat_device[val_idx] /= std_vec[idx]; } } //===========launch /* export the function for be called by host */ void mean_by_rows_launch(float *mat_device, float *mean_device, u32 rows, u32 cols){ const u32 COLS = 256; dim3 grid0( MAX(1,ceil(double(cols)/COLS)) ); dim3 block0(COLS); hipLaunchKernelGGL(( mean_by_rows<float>), dim3(grid0), dim3(block0), 0, 0, mat_device, mean_device, rows, cols); } void subtract_mean_by_rows_launch(float *mat_device, float *mean_device, u32 rows, u32 cols){ mean_by_rows_launch(mat_device, mean_device, rows, cols); const u32 block_size = 32; dim3 block1(block_size, block_size); dim3 grid1(MAX(1, ceil((double)cols/block_size)), MAX(1, ceil((double)rows/block_size))); hipLaunchKernelGGL(( zero_mean_by_rows<float>), dim3(grid1),dim3(block1), 0, 0, mat_device, mean_device, rows, cols); } /* function: normalization_by_rows_launch */ void normalization_by_rows_launch(float *mat_device, float *mean_device, float *std_device, u32 rows, u32 cols){ subtract_mean_by_rows_launch(mat_device, mean_device, rows, cols); const u32 COLS = 256; dim3 grid0( MAX(1, ceil(double(cols)/COLS)) ); dim3 block0(COLS); hipLaunchKernelGGL(( std_by_rows<float>), dim3(grid0), dim3(block0), 0, 0, mat_device, mean_device, std_device, rows, cols); const u32 block_size = 32; dim3 block1(block_size, block_size); dim3 grid1(MAX(1, ceil(double(cols)/block_size)), MAX(1, ceil(double(rows)/block_size))); hipLaunchKernelGGL(( one_std_by_rows<float>), dim3(grid1), dim3(block1), 0, 0, mat_device, std_device, rows, cols); } //===============export to host void mean_by_rows_cpu(float *mat_device, float *mean_device, u32 rows, u32 cols){ mean_by_rows_launch(mat_device, mean_device, rows, cols); } void subtract_mean_by_rows_cpu(float *mat_device, float *mean_device, u32 rows, u32 cols){ subtract_mean_by_rows_launch(mat_device, mean_device, rows, cols); } void normalization_by_rows_cpu(float *mat_device, float *mean_device, float *std_device, u32 rows, u32 cols){ normalization_by_rows_launch(mat_device, mean_device, std_device, rows, cols); }
18b781f877d06170d3193a9a295c5304c1c98707.cu
#include <stdio.h> #include <cuda_runtime.h> #include <math_functions.hpp> #include "common/include/type.h" #include "common/include/common.h" //====================template /* calc the mean by row dimension */ template<typename T> __global__ void mean_by_rows(T *mat_device, T *mean_vec, u32 rows, u32 cols){ u32 idy = blockIdx.y*blockDim.y + threadIdx.y; u32 idx = blockIdx.x*blockDim.x + threadIdx.x; u32 thread_idx = idy*(gridDim.x*blockIdx.x) + idx; if(thread_idx >= cols) return ; T mean = (T)0; T cur_val = (T)0; for (u32 i = 0; i < rows; i++){ cur_val = mat_device[i*cols+thread_idx]; // in case of sum is too big mean = mean*((float)i/(i+1)) + cur_val/(double)(i+1); } mean_vec[thread_idx] = mean; } /* each row subtract the mean vector */ template<typename T> __global__ void zero_mean_by_rows(T *mat_device, T *mean_vec, u32 rows, u32 cols){ u32 idy = blockIdx.y*blockDim.y + threadIdx.y; u32 idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < cols && idy < rows){ u32 val_idx = idy*cols + idx; mat_device[val_idx] -= mean_vec[idx]; } } /* calc the std by rows dimension */ template<typename T> __global__ void std_by_rows(T *mat_device, T *mean_vec, T *std_vec, u32 rows, u32 cols){ u32 idy = blockIdx.y*blockDim.y + threadIdx.y; u32 idx = blockIdx.x*blockDim.x + threadIdx.x; u32 thread_idx = idy*(gridDim.x*blockIdx.x) + idx; if (thread_idx >= cols) return ; T cur_val; T std = T(0); for (size_t i = 0; i< rows; i++){ cur_val = mat_device[i*cols+thread_idx]; // in case of std sum is bigger than limits std = std*((float)i/(i+1)) + cur_val*cur_val/(double)(i+1); } std_vec[thread_idx] = sqrt(std); } /* each row divide the std vector */ template<typename T> __global__ void one_std_by_rows(T *mat_device, T *std_vec, u32 rows, u32 cols){ u32 idy = blockIdx.y*blockDim.y + threadIdx.y; u32 idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < cols && idy < rows){ u32 val_idx = idy*cols + idx; mat_device[val_idx] /= std_vec[idx]; } } //===========launch /* export the function for be called by host */ void mean_by_rows_launch(float *mat_device, float *mean_device, u32 rows, u32 cols){ const u32 COLS = 256; dim3 grid0( MAX(1,ceil(double(cols)/COLS)) ); dim3 block0(COLS); mean_by_rows<float><<<grid0, block0>>>(mat_device, mean_device, rows, cols); } void subtract_mean_by_rows_launch(float *mat_device, float *mean_device, u32 rows, u32 cols){ mean_by_rows_launch(mat_device, mean_device, rows, cols); const u32 block_size = 32; dim3 block1(block_size, block_size); dim3 grid1(MAX(1, ceil((double)cols/block_size)), MAX(1, ceil((double)rows/block_size))); zero_mean_by_rows<float><<<grid1,block1>>>(mat_device, mean_device, rows, cols); } /* function: normalization_by_rows_launch */ void normalization_by_rows_launch(float *mat_device, float *mean_device, float *std_device, u32 rows, u32 cols){ subtract_mean_by_rows_launch(mat_device, mean_device, rows, cols); const u32 COLS = 256; dim3 grid0( MAX(1, ceil(double(cols)/COLS)) ); dim3 block0(COLS); std_by_rows<float><<<grid0, block0>>>(mat_device, mean_device, std_device, rows, cols); const u32 block_size = 32; dim3 block1(block_size, block_size); dim3 grid1(MAX(1, ceil(double(cols)/block_size)), MAX(1, ceil(double(rows)/block_size))); one_std_by_rows<float><<<grid1, block1>>>(mat_device, std_device, rows, cols); } //===============export to host void mean_by_rows_cpu(float *mat_device, float *mean_device, u32 rows, u32 cols){ mean_by_rows_launch(mat_device, mean_device, rows, cols); } void subtract_mean_by_rows_cpu(float *mat_device, float *mean_device, u32 rows, u32 cols){ subtract_mean_by_rows_launch(mat_device, mean_device, rows, cols); } void normalization_by_rows_cpu(float *mat_device, float *mean_device, float *std_device, u32 rows, u32 cols){ normalization_by_rows_launch(mat_device, mean_device, std_device, rows, cols); }
a5d315e4c850d4f7eabcd82d565982e887c51dae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ /* -*- mode: c++ -*- */ #include "gg.h" #include "ggcuda.h" void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; unsigned long long * P_COMP_CURRENT; #include "gen_hip.cuh" static const int __tb_ConnectedComp = TB_SIZE; static const int __tb_FirstItr_ConnectedComp = TB_SIZE; __global__ void InitializeGraph(CSRGraph graph, unsigned int __nowned, unsigned int __begin, unsigned int __end, unsigned long long * p_comp_current) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_comp_current[src] = graph.node_data[src]; } } // FP: "7 -> 8; } __global__ void ConnectedComp(CSRGraph graph, DynamicBitset *is_updated, unsigned int __nowned, unsigned int __begin, unsigned int __end, unsigned long long * p_comp_current, Sum ret_val) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_ConnectedComp; typedef hipcub::BlockReduce<int, TB_SIZE> _br; __shared__ _br::TempStorage _ts; ret_val.thread_entry(); index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; bool pop = src < __end; if (pop) { } struct NPInspector1 _np = {0,0,0,0,0,0}; __shared__ struct { index_type src; } _np_closure [TB_SIZE]; _np_closure[threadIdx.x].src = src; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } __syncthreads(); while (true) { if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } __syncthreads(); if (nps.tb.owner == MAX_TB_SIZE + 1) { __syncthreads(); break; } if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } __syncthreads(); int ns = nps.tb.start; int ne = nps.tb.size; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } assert(nps.tb.src < __kernel_tb_size); src = _np_closure[nps.tb.src].src; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type jj; jj = ns +_np_j; { index_type dst; unsigned long long new_comp; unsigned long long old_comp; dst = graph.getAbsDestination(jj); new_comp = p_comp_current[dst]; if (p_comp_current[src] > new_comp) { old_comp = atomicMin(&p_comp_current[src], new_comp); if (old_comp > new_comp) { is_updated->set(src); ret_val.do_return( 1); } } } } __syncthreads(); } { const int warpid = threadIdx.x / 32; const int _np_laneid = cub::LaneId(); while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); src = _np_closure[nps.warp.src[warpid]].src; for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type jj; jj = _np_w_start +_np_ii; { index_type dst; unsigned long long new_comp; unsigned long long old_comp; dst = graph.getAbsDestination(jj); new_comp = p_comp_current[dst]; if (p_comp_current[src] > new_comp) { old_comp = atomicMin(&p_comp_current[src], new_comp); if (old_comp > new_comp) { is_updated->set(src); ret_val.do_return( 1); } } } } } __syncthreads(); } __syncthreads(); _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; while (_np.work()) { int _np_i =0; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); __syncthreads(); for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type jj; assert(nps.fg.src[_np_i] < __kernel_tb_size); src = _np_closure[nps.fg.src[_np_i]].src; jj= nps.fg.itvalue[_np_i]; { index_type dst; unsigned long long new_comp; unsigned long long old_comp; dst = graph.getAbsDestination(jj); new_comp = p_comp_current[dst]; if (p_comp_current[src] > new_comp) { old_comp = atomicMin(&p_comp_current[src], new_comp); if (old_comp > new_comp) { ret_val.do_return( 1); is_updated->set(src); } } } } _np.execute_round_done(ITSIZE); __syncthreads(); } assert(threadIdx.x < __kernel_tb_size); src = _np_closure[threadIdx.x].src; } ret_val.thread_exit<_br>(_ts); } void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( InitializeGraph) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, ctx->nowned, __begin, __end, ctx->comp_current.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeGraph_all_cuda(struct CUDA_Context * ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->nowned, ctx); // FP: "2 -> 3; } void ConnectedComp_cuda(unsigned int __begin, unsigned int __end, int & __retval, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<int> retval = Shared<int>(1); Sum _rv; *(retval.cpu_wr_ptr()) = 0; _rv.rv = retval.gpu_wr_ptr(); hipLaunchKernelGGL(( ConnectedComp) , dim3(blocks), dim3(__tb_ConnectedComp), 0, 0, ctx->gg, ctx->comp_current.is_updated.gpu_rd_ptr(), ctx->nowned, __begin, __end, ctx->comp_current.data.gpu_wr_ptr(), _rv); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; __retval = *(retval.cpu_rd_ptr()); // FP: "7 -> 8; } void ConnectedComp_all_cuda(int & __retval, struct CUDA_Context * ctx) { // FP: "1 -> 2; ConnectedComp_cuda(0, ctx->nowned, __retval, ctx); // FP: "2 -> 3; }
a5d315e4c850d4f7eabcd82d565982e887c51dae.cu
/* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ /* -*- mode: c++ -*- */ #include "gg.h" #include "ggcuda.h" void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; unsigned long long * P_COMP_CURRENT; #include "gen_cuda.cuh" static const int __tb_ConnectedComp = TB_SIZE; static const int __tb_FirstItr_ConnectedComp = TB_SIZE; __global__ void InitializeGraph(CSRGraph graph, unsigned int __nowned, unsigned int __begin, unsigned int __end, unsigned long long * p_comp_current) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_comp_current[src] = graph.node_data[src]; } } // FP: "7 -> 8; } __global__ void ConnectedComp(CSRGraph graph, DynamicBitset *is_updated, unsigned int __nowned, unsigned int __begin, unsigned int __end, unsigned long long * p_comp_current, Sum ret_val) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_ConnectedComp; typedef cub::BlockReduce<int, TB_SIZE> _br; __shared__ _br::TempStorage _ts; ret_val.thread_entry(); index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; bool pop = src < __end; if (pop) { } struct NPInspector1 _np = {0,0,0,0,0,0}; __shared__ struct { index_type src; } _np_closure [TB_SIZE]; _np_closure[threadIdx.x].src = src; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } __syncthreads(); while (true) { if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } __syncthreads(); if (nps.tb.owner == MAX_TB_SIZE + 1) { __syncthreads(); break; } if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } __syncthreads(); int ns = nps.tb.start; int ne = nps.tb.size; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } assert(nps.tb.src < __kernel_tb_size); src = _np_closure[nps.tb.src].src; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type jj; jj = ns +_np_j; { index_type dst; unsigned long long new_comp; unsigned long long old_comp; dst = graph.getAbsDestination(jj); new_comp = p_comp_current[dst]; if (p_comp_current[src] > new_comp) { old_comp = atomicMin(&p_comp_current[src], new_comp); if (old_comp > new_comp) { is_updated->set(src); ret_val.do_return( 1); } } } } __syncthreads(); } { const int warpid = threadIdx.x / 32; const int _np_laneid = cub::LaneId(); while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); src = _np_closure[nps.warp.src[warpid]].src; for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type jj; jj = _np_w_start +_np_ii; { index_type dst; unsigned long long new_comp; unsigned long long old_comp; dst = graph.getAbsDestination(jj); new_comp = p_comp_current[dst]; if (p_comp_current[src] > new_comp) { old_comp = atomicMin(&p_comp_current[src], new_comp); if (old_comp > new_comp) { is_updated->set(src); ret_val.do_return( 1); } } } } } __syncthreads(); } __syncthreads(); _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; while (_np.work()) { int _np_i =0; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); __syncthreads(); for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type jj; assert(nps.fg.src[_np_i] < __kernel_tb_size); src = _np_closure[nps.fg.src[_np_i]].src; jj= nps.fg.itvalue[_np_i]; { index_type dst; unsigned long long new_comp; unsigned long long old_comp; dst = graph.getAbsDestination(jj); new_comp = p_comp_current[dst]; if (p_comp_current[src] > new_comp) { old_comp = atomicMin(&p_comp_current[src], new_comp); if (old_comp > new_comp) { ret_val.do_return( 1); is_updated->set(src); } } } } _np.execute_round_done(ITSIZE); __syncthreads(); } assert(threadIdx.x < __kernel_tb_size); src = _np_closure[threadIdx.x].src; } ret_val.thread_exit<_br>(_ts); } void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; InitializeGraph <<<blocks, threads>>>(ctx->gg, ctx->nowned, __begin, __end, ctx->comp_current.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeGraph_all_cuda(struct CUDA_Context * ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->nowned, ctx); // FP: "2 -> 3; } void ConnectedComp_cuda(unsigned int __begin, unsigned int __end, int & __retval, struct CUDA_Context * ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<int> retval = Shared<int>(1); Sum _rv; *(retval.cpu_wr_ptr()) = 0; _rv.rv = retval.gpu_wr_ptr(); ConnectedComp <<<blocks, __tb_ConnectedComp>>>(ctx->gg, ctx->comp_current.is_updated.gpu_rd_ptr(), ctx->nowned, __begin, __end, ctx->comp_current.data.gpu_wr_ptr(), _rv); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; __retval = *(retval.cpu_rd_ptr()); // FP: "7 -> 8; } void ConnectedComp_all_cuda(int & __retval, struct CUDA_Context * ctx) { // FP: "1 -> 2; ConnectedComp_cuda(0, ctx->nowned, __retval, ctx); // FP: "2 -> 3; }
19eccbc1f0bc67da04e58674bb7511aafc76d828.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * raymarching.cu * * Created on: 31.07.2019 * Author: Megacrafter127 */ #include "raymarching.cuh" #include <cassert> #include <unordered_map> //__constant__ static float4 green={0,1,0,1}; //__constant__ static int3 overlayoffset={0,0,0}; __constant__ static float framedata[2]; //__constant__ static unsigned precisions[2]={0,2}; __device__ static constexpr scalarType collisionDistance(scalarType totalDist,scalarType multiplier) { return multiplier*totalDist*SCL_EPSILON; } __host__ __device__ static constexpr float4 &operator+=(float4 &a, float4 b) { a.x+=b.x; a.y+=b.y; a.z+=b.z; a.w+=b.w; return a; } typedef struct { vectorType pos; scalarType dist,len; } minStore_t; __host__ __device__ inline size_t idx(uint3 i, dim3 b, size_t j) { return i.x+b.x*(i.y+b.y*(i.z+b.z*j)); } __shared__ extern minStore_t mins[]; template<typename T> __device__ inline void simpleSurf2Dwrite(register T data, register hipSurfaceObject_t surface, register int x, register int y, register hipSurfaceBoundaryMode mode = hipBoundaryModeZero) { return surf2Dwrite(data, surface, x*sizeof(T), y, mode); } __device__ static constexpr uint3 operator+(uint3 a, uint3 b) { a.x += b.x; a.y += b.y; a.z += b.z; return a; } __device__ static constexpr uint3 operator*(uint3 a, dim3 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; return a; } __device__ static constexpr uint3 operator*(dim3 a, uint3 b) { return b*a; } __device__ static constexpr int3 operator-(uint3 a, dim3 b) { int3 ret{}; ret.x=a.x-b.x; ret.y=a.y-b.y; ret.z=a.z-b.z; return ret; } __device__ static constexpr dim3 operator/(dim3 a, unsigned scalar) { a.x/=scalar; a.y/=scalar; a.z/=scalar; return a; } __global__ static void marchRay(hipSurfaceObject_t surf, dim3 bounds, size_t frame, const void *data) { const register uint3 pos = threadIdx+blockDim*blockIdx; __shared__ world_t world; if((threadIdx.x|threadIdx.y|threadIdx.z)==0) { memcpy(&world,data,sizeof(world_t)); } __syncthreads(); register scalarType totalDist=0._s,divergence; register vectorType start=world.camera.pos; register vectorType ray=world.camera.rays(divergence,pos-bounds/2,frame); ray/=norm(ray); register size_t step=0; for(size_t i=0;i<world.shapeCount;i++) { mins[idx(threadIdx,blockDim,i)].pos=start; mins[idx(threadIdx,blockDim,i)].dist=0._s; mins[idx(threadIdx,blockDim,i)].len=INFINITY; } register int end=0; for(;totalDist*SCL_EPSILON < world.maxErr;step++) { const register scalarType colDist=collisionDistance(totalDist,world.collisionMultiplier); register scalarType minDist=INFINITY; for(size_t i=0;i<world.shapeCount;i++) { const register scalarType dist=world.shapes[i].getDistance(start,frame); if(dist<minDist) minDist=dist; if(dist<colDist) end|=1; if(dist<mins[idx(threadIdx,blockDim,i)].len) { mins[idx(threadIdx,blockDim,i)].len=dist; mins[idx(threadIdx,blockDim,i)].pos=start; mins[idx(threadIdx,blockDim,i)].dist=totalDist; } } totalDist+=minDist; start+=ray*minDist; if(end) { break; } } register float4 color=world.background; for(size_t i=0;i<world.shapeCount;i++) { const register scalarType distance=world.shapes[i].getDistance(start,frame); register float4 c=world.shapes[i].getColor(frame, distance<collisionDistance(totalDist,world.collisionMultiplier), start,totalDist,distance,step, mins[idx(threadIdx,blockDim,i)].len, world.shapes[i].getDistance(mins[idx(threadIdx,blockDim,i)].pos,frame), mins[idx(threadIdx,blockDim,i)].dist); c.x*=c.w; c.y*=c.w; c.z*=c.w; color+=c; } if(color.w>0 || color.w<0) { color.x/=color.w; color.y/=color.w; color.z/=color.w; color.w=1; } else { color.x=0; color.y=0; color.z=0; } simpleSurf2Dwrite(color,surf,pos.x,pos.y); //return overlayNumbers(color,pos,green,overlayoffset,2,precisions,framedata,2); } #define CHUNKIFY(len,chunk) (len+(chunk-len%chunk)%chunk) #define CHUNKCOUNT(len,chunk) (CHUNKIFY(len,chunk)/chunk) constexpr dim3 operator+(dim3 a, dim3 b) { a.x += b.x; a.y += b.y; a.z += b.z; return a; } constexpr dim3 operator-(dim3 a, dim3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; return a; } constexpr dim3 operator%(dim3 a, dim3 b) { a.x %= b.x; a.y %= b.y; a.z %= b.z; return a; } constexpr dim3 operator/(dim3 a, dim3 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; return a; } static std::unordered_map<const void*,dim3> threads; static std::unordered_map<const void*,size_t> dyn_shared; static void launchRayMarch(hipSurfaceObject_t surface, dim3 bounds, size_t frame, const void *userData, hipStream_t stream) { hipLaunchKernelGGL(( marchRay), dim3(CHUNKCOUNT(bounds,threads[userData])),dim3(threads[userData]),dyn_shared[userData],stream, surface,bounds,frame,userData); } static std::unordered_map<const void*,preFrameFunc> pf; static clock_t start,current=0,last; constexpr static float cps=CLOCKS_PER_SEC; static float frames=0,fb[2]; static int preframeF(size_t frame, const void *data, hipStream_t stream) { clock_t c=clock(); fb[1]=cps/(c-last); last=c; c-=start; c/=CLOCKS_PER_SEC; if(c>current) { fb[0]=frames; frames=0; current=c; } hipMemcpyToSymbolAsync(framedata,&fb,sizeof(float)*2,0,hipMemcpyHostToDevice,stream); frames+=1; dyn_shared[data]=sizeof(minStore_t)*threads[data].x*threads[data].y*threads[data].z*static_cast<const world_t*>(data)->shapeCount; preFrameFunc f=pf[data]; if(f) return f(frame,data,stream); return 0; } hipError_t autoRenderShapes(SDL_Window *win, world_t *world, postFrameFunc postframe, preFrameFunc preframe, eventFunc eventFunction, unsigned x_threads, unsigned y_threads) { pf[world]=preframe; threads[world]=dim3(x_threads,y_threads,1); hipError_t err=autoDrawCUDA(win,launchRayMarch,postframe,preframeF,eventFunction,world); return err; }
19eccbc1f0bc67da04e58674bb7511aafc76d828.cu
/* * raymarching.cu * * Created on: 31.07.2019 * Author: Megacrafter127 */ #include "raymarching.cuh" #include <cassert> #include <unordered_map> //__constant__ static float4 green={0,1,0,1}; //__constant__ static int3 overlayoffset={0,0,0}; __constant__ static float framedata[2]; //__constant__ static unsigned precisions[2]={0,2}; __device__ static constexpr scalarType collisionDistance(scalarType totalDist,scalarType multiplier) { return multiplier*totalDist*SCL_EPSILON; } __host__ __device__ static constexpr float4 &operator+=(float4 &a, float4 b) { a.x+=b.x; a.y+=b.y; a.z+=b.z; a.w+=b.w; return a; } typedef struct { vectorType pos; scalarType dist,len; } minStore_t; __host__ __device__ inline size_t idx(uint3 i, dim3 b, size_t j) { return i.x+b.x*(i.y+b.y*(i.z+b.z*j)); } __shared__ extern minStore_t mins[]; template<typename T> __device__ inline void simpleSurf2Dwrite(register T data, register cudaSurfaceObject_t surface, register int x, register int y, register cudaSurfaceBoundaryMode mode = cudaBoundaryModeZero) { return surf2Dwrite(data, surface, x*sizeof(T), y, mode); } __device__ static constexpr uint3 operator+(uint3 a, uint3 b) { a.x += b.x; a.y += b.y; a.z += b.z; return a; } __device__ static constexpr uint3 operator*(uint3 a, dim3 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; return a; } __device__ static constexpr uint3 operator*(dim3 a, uint3 b) { return b*a; } __device__ static constexpr int3 operator-(uint3 a, dim3 b) { int3 ret{}; ret.x=a.x-b.x; ret.y=a.y-b.y; ret.z=a.z-b.z; return ret; } __device__ static constexpr dim3 operator/(dim3 a, unsigned scalar) { a.x/=scalar; a.y/=scalar; a.z/=scalar; return a; } __global__ static void marchRay(cudaSurfaceObject_t surf, dim3 bounds, size_t frame, const void *data) { const register uint3 pos = threadIdx+blockDim*blockIdx; __shared__ world_t world; if((threadIdx.x|threadIdx.y|threadIdx.z)==0) { memcpy(&world,data,sizeof(world_t)); } __syncthreads(); register scalarType totalDist=0._s,divergence; register vectorType start=world.camera.pos; register vectorType ray=world.camera.rays(divergence,pos-bounds/2,frame); ray/=norm(ray); register size_t step=0; for(size_t i=0;i<world.shapeCount;i++) { mins[idx(threadIdx,blockDim,i)].pos=start; mins[idx(threadIdx,blockDim,i)].dist=0._s; mins[idx(threadIdx,blockDim,i)].len=INFINITY; } register int end=0; for(;totalDist*SCL_EPSILON < world.maxErr;step++) { const register scalarType colDist=collisionDistance(totalDist,world.collisionMultiplier); register scalarType minDist=INFINITY; for(size_t i=0;i<world.shapeCount;i++) { const register scalarType dist=world.shapes[i].getDistance(start,frame); if(dist<minDist) minDist=dist; if(dist<colDist) end|=1; if(dist<mins[idx(threadIdx,blockDim,i)].len) { mins[idx(threadIdx,blockDim,i)].len=dist; mins[idx(threadIdx,blockDim,i)].pos=start; mins[idx(threadIdx,blockDim,i)].dist=totalDist; } } totalDist+=minDist; start+=ray*minDist; if(end) { break; } } register float4 color=world.background; for(size_t i=0;i<world.shapeCount;i++) { const register scalarType distance=world.shapes[i].getDistance(start,frame); register float4 c=world.shapes[i].getColor(frame, distance<collisionDistance(totalDist,world.collisionMultiplier), start,totalDist,distance,step, mins[idx(threadIdx,blockDim,i)].len, world.shapes[i].getDistance(mins[idx(threadIdx,blockDim,i)].pos,frame), mins[idx(threadIdx,blockDim,i)].dist); c.x*=c.w; c.y*=c.w; c.z*=c.w; color+=c; } if(color.w>0 || color.w<0) { color.x/=color.w; color.y/=color.w; color.z/=color.w; color.w=1; } else { color.x=0; color.y=0; color.z=0; } simpleSurf2Dwrite(color,surf,pos.x,pos.y); //return overlayNumbers(color,pos,green,overlayoffset,2,precisions,framedata,2); } #define CHUNKIFY(len,chunk) (len+(chunk-len%chunk)%chunk) #define CHUNKCOUNT(len,chunk) (CHUNKIFY(len,chunk)/chunk) constexpr dim3 operator+(dim3 a, dim3 b) { a.x += b.x; a.y += b.y; a.z += b.z; return a; } constexpr dim3 operator-(dim3 a, dim3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; return a; } constexpr dim3 operator%(dim3 a, dim3 b) { a.x %= b.x; a.y %= b.y; a.z %= b.z; return a; } constexpr dim3 operator/(dim3 a, dim3 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; return a; } static std::unordered_map<const void*,dim3> threads; static std::unordered_map<const void*,size_t> dyn_shared; static void launchRayMarch(cudaSurfaceObject_t surface, dim3 bounds, size_t frame, const void *userData, cudaStream_t stream) { marchRay<<<CHUNKCOUNT(bounds,threads[userData]),threads[userData],dyn_shared[userData],stream>>>(surface,bounds,frame,userData); } static std::unordered_map<const void*,preFrameFunc> pf; static clock_t start,current=0,last; constexpr static float cps=CLOCKS_PER_SEC; static float frames=0,fb[2]; static int preframeF(size_t frame, const void *data, cudaStream_t stream) { clock_t c=clock(); fb[1]=cps/(c-last); last=c; c-=start; c/=CLOCKS_PER_SEC; if(c>current) { fb[0]=frames; frames=0; current=c; } cudaMemcpyToSymbolAsync(framedata,&fb,sizeof(float)*2,0,cudaMemcpyHostToDevice,stream); frames+=1; dyn_shared[data]=sizeof(minStore_t)*threads[data].x*threads[data].y*threads[data].z*static_cast<const world_t*>(data)->shapeCount; preFrameFunc f=pf[data]; if(f) return f(frame,data,stream); return 0; } cudaError_t autoRenderShapes(SDL_Window *win, world_t *world, postFrameFunc postframe, preFrameFunc preframe, eventFunc eventFunction, unsigned x_threads, unsigned y_threads) { pf[world]=preframe; threads[world]=dim3(x_threads,y_threads,1); cudaError_t err=autoDrawCUDA(win,launchRayMarch,postframe,preframeF,eventFunction,world); return err; }
ad3798c4c5a38b9f3aaa372712c13a942138be78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //===---------- target_impl.cu - NVPTX OpenMP GPU options ------- CUDA -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // Definitions of target specific functions // //===----------------------------------------------------------------------===// #pragma omp declare target #include "common/debug.h" #include "target_impl.h" #include "target_interface.h" EXTERN void __kmpc_impl_unpack(uint64_t val, uint32_t &lo, uint32_t &hi) { asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(val)); } EXTERN uint64_t __kmpc_impl_pack(uint32_t lo, uint32_t hi) { uint64_t val; asm volatile("mov.b64 %0, {%1,%2};" : "=l"(val) : "r"(lo), "r"(hi)); return val; } EXTERN __kmpc_impl_lanemask_t __kmpc_impl_lanemask_lt() { __kmpc_impl_lanemask_t res; asm("mov.u32 %0, %%lanemask_lt;" : "=r"(res)); return res; } EXTERN __kmpc_impl_lanemask_t __kmpc_impl_lanemask_gt() { __kmpc_impl_lanemask_t res; asm("mov.u32 %0, %%lanemask_gt;" : "=r"(res)); return res; } EXTERN uint32_t __kmpc_impl_smid() { uint32_t id; asm("mov.u32 %0, %%smid;" : "=r"(id)); return id; } EXTERN double __kmpc_impl_get_wtick() { // Timer precision is 1ns return ((double)1E-9); } EXTERN double __kmpc_impl_get_wtime() { unsigned long long nsecs; asm("mov.u64 %0, %%globaltimer;" : "=l"(nsecs)); return (double)nsecs * __kmpc_impl_get_wtick(); } EXTERN __kmpc_impl_lanemask_t __kmpc_impl_activemask() { unsigned int Mask; asm volatile("activemask.b32 %0;" : "=r"(Mask)); return Mask; } EXTERN void __kmpc_impl_syncthreads() { __syncthreads(); } EXTERN void __kmpc_impl_syncwarp(__kmpc_impl_lanemask_t Mask) { __nvvm_bar_warp_sync(Mask); } // NVPTX specific kernel initialization EXTERN void __kmpc_impl_target_init() { /* nvptx needs no extra setup */ } // Barrier until num_threads arrive. EXTERN void __kmpc_impl_named_sync(uint32_t num_threads) { // The named barrier for active parallel threads of a team in an L1 parallel // region to synchronize with each other. int barrier = 1; asm volatile("bar.sync %0, %1;" : : "r"(barrier), "r"(num_threads) : "memory"); } EXTERN void __kmpc_impl_threadfence() { __nvvm_membar_gl(); } EXTERN void __kmpc_impl_threadfence_block() { __nvvm_membar_cta(); } EXTERN void __kmpc_impl_threadfence_system() { __nvvm_membar_sys(); } // Calls to the NVPTX layer (assuming 1D layout) EXTERN int GetThreadIdInBlock() { return __nvvm_read_ptx_sreg_tid_x(); } EXTERN int GetBlockIdInKernel() { return __nvvm_read_ptx_sreg_ctaid_x(); } EXTERN int GetNumberOfBlocksInKernel() { return __nvvm_read_ptx_sreg_nctaid_x(); } EXTERN int GetNumberOfThreadsInBlock() { return __nvvm_read_ptx_sreg_ntid_x(); } EXTERN unsigned GetWarpId() { return GetThreadIdInBlock() / WARPSIZE; } EXTERN unsigned GetWarpSize() { return WARPSIZE; } EXTERN unsigned GetLaneId() { return GetThreadIdInBlock() & (WARPSIZE - 1); } // Atomics uint32_t __kmpc_atomic_add(uint32_t *Address, uint32_t Val) { return __atomic_fetch_add(Address, Val, __ATOMIC_SEQ_CST); } uint32_t __kmpc_atomic_inc(uint32_t *Address, uint32_t Val) { return __nvvm_atom_inc_gen_ui(Address, Val); } uint32_t __kmpc_atomic_max(uint32_t *Address, uint32_t Val) { return __atomic_fetch_max(Address, Val, __ATOMIC_SEQ_CST); } uint32_t __kmpc_atomic_exchange(uint32_t *Address, uint32_t Val) { uint32_t R; __atomic_exchange(Address, &Val, &R, __ATOMIC_SEQ_CST); return R; } uint32_t __kmpc_atomic_cas(uint32_t *Address, uint32_t Compare, uint32_t Val) { (void)__atomic_compare_exchange(Address, &Compare, &Val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); return Compare; } unsigned long long __kmpc_atomic_exchange(unsigned long long *Address, unsigned long long Val) { unsigned long long R; __atomic_exchange(Address, &Val, &R, __ATOMIC_SEQ_CST); return R; } unsigned long long __kmpc_atomic_add(unsigned long long *Address, unsigned long long Val) { return __atomic_fetch_add(Address, Val, __ATOMIC_SEQ_CST); } #define __OMP_SPIN 1000 #define UNSET 0u #define SET 1u EXTERN void __kmpc_impl_init_lock(omp_lock_t *lock) { __kmpc_impl_unset_lock(lock); } EXTERN void __kmpc_impl_destroy_lock(omp_lock_t *lock) { __kmpc_impl_unset_lock(lock); } EXTERN void __kmpc_impl_set_lock(omp_lock_t *lock) { // TODO: not sure spinning is a good idea here.. while (__kmpc_atomic_cas(lock, UNSET, SET) != UNSET) { int32_t start = __nvvm_read_ptx_sreg_clock(); int32_t now; for (;;) { now = __nvvm_read_ptx_sreg_clock(); int32_t cycles = now > start ? now - start : now + (0xffffffff - start); if (cycles >= __OMP_SPIN * GetBlockIdInKernel()) { break; } } } // wait for 0 to be the read value } EXTERN void __kmpc_impl_unset_lock(omp_lock_t *lock) { (void)__kmpc_atomic_exchange(lock, UNSET); } EXTERN int __kmpc_impl_test_lock(omp_lock_t *lock) { return __kmpc_atomic_add(lock, 0u); } EXTERN void *__kmpc_impl_malloc(size_t x) { return malloc(x); } EXTERN void __kmpc_impl_free(void *x) { free(x); } #pragma omp end declare target
ad3798c4c5a38b9f3aaa372712c13a942138be78.cu
//===---------- target_impl.cu - NVPTX OpenMP GPU options ------- CUDA -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // Definitions of target specific functions // //===----------------------------------------------------------------------===// #pragma omp declare target #include "common/debug.h" #include "target_impl.h" #include "target_interface.h" EXTERN void __kmpc_impl_unpack(uint64_t val, uint32_t &lo, uint32_t &hi) { asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(val)); } EXTERN uint64_t __kmpc_impl_pack(uint32_t lo, uint32_t hi) { uint64_t val; asm volatile("mov.b64 %0, {%1,%2};" : "=l"(val) : "r"(lo), "r"(hi)); return val; } EXTERN __kmpc_impl_lanemask_t __kmpc_impl_lanemask_lt() { __kmpc_impl_lanemask_t res; asm("mov.u32 %0, %%lanemask_lt;" : "=r"(res)); return res; } EXTERN __kmpc_impl_lanemask_t __kmpc_impl_lanemask_gt() { __kmpc_impl_lanemask_t res; asm("mov.u32 %0, %%lanemask_gt;" : "=r"(res)); return res; } EXTERN uint32_t __kmpc_impl_smid() { uint32_t id; asm("mov.u32 %0, %%smid;" : "=r"(id)); return id; } EXTERN double __kmpc_impl_get_wtick() { // Timer precision is 1ns return ((double)1E-9); } EXTERN double __kmpc_impl_get_wtime() { unsigned long long nsecs; asm("mov.u64 %0, %%globaltimer;" : "=l"(nsecs)); return (double)nsecs * __kmpc_impl_get_wtick(); } EXTERN __kmpc_impl_lanemask_t __kmpc_impl_activemask() { unsigned int Mask; asm volatile("activemask.b32 %0;" : "=r"(Mask)); return Mask; } EXTERN void __kmpc_impl_syncthreads() { __syncthreads(); } EXTERN void __kmpc_impl_syncwarp(__kmpc_impl_lanemask_t Mask) { __nvvm_bar_warp_sync(Mask); } // NVPTX specific kernel initialization EXTERN void __kmpc_impl_target_init() { /* nvptx needs no extra setup */ } // Barrier until num_threads arrive. EXTERN void __kmpc_impl_named_sync(uint32_t num_threads) { // The named barrier for active parallel threads of a team in an L1 parallel // region to synchronize with each other. int barrier = 1; asm volatile("bar.sync %0, %1;" : : "r"(barrier), "r"(num_threads) : "memory"); } EXTERN void __kmpc_impl_threadfence() { __nvvm_membar_gl(); } EXTERN void __kmpc_impl_threadfence_block() { __nvvm_membar_cta(); } EXTERN void __kmpc_impl_threadfence_system() { __nvvm_membar_sys(); } // Calls to the NVPTX layer (assuming 1D layout) EXTERN int GetThreadIdInBlock() { return __nvvm_read_ptx_sreg_tid_x(); } EXTERN int GetBlockIdInKernel() { return __nvvm_read_ptx_sreg_ctaid_x(); } EXTERN int GetNumberOfBlocksInKernel() { return __nvvm_read_ptx_sreg_nctaid_x(); } EXTERN int GetNumberOfThreadsInBlock() { return __nvvm_read_ptx_sreg_ntid_x(); } EXTERN unsigned GetWarpId() { return GetThreadIdInBlock() / WARPSIZE; } EXTERN unsigned GetWarpSize() { return WARPSIZE; } EXTERN unsigned GetLaneId() { return GetThreadIdInBlock() & (WARPSIZE - 1); } // Atomics uint32_t __kmpc_atomic_add(uint32_t *Address, uint32_t Val) { return __atomic_fetch_add(Address, Val, __ATOMIC_SEQ_CST); } uint32_t __kmpc_atomic_inc(uint32_t *Address, uint32_t Val) { return __nvvm_atom_inc_gen_ui(Address, Val); } uint32_t __kmpc_atomic_max(uint32_t *Address, uint32_t Val) { return __atomic_fetch_max(Address, Val, __ATOMIC_SEQ_CST); } uint32_t __kmpc_atomic_exchange(uint32_t *Address, uint32_t Val) { uint32_t R; __atomic_exchange(Address, &Val, &R, __ATOMIC_SEQ_CST); return R; } uint32_t __kmpc_atomic_cas(uint32_t *Address, uint32_t Compare, uint32_t Val) { (void)__atomic_compare_exchange(Address, &Compare, &Val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); return Compare; } unsigned long long __kmpc_atomic_exchange(unsigned long long *Address, unsigned long long Val) { unsigned long long R; __atomic_exchange(Address, &Val, &R, __ATOMIC_SEQ_CST); return R; } unsigned long long __kmpc_atomic_add(unsigned long long *Address, unsigned long long Val) { return __atomic_fetch_add(Address, Val, __ATOMIC_SEQ_CST); } #define __OMP_SPIN 1000 #define UNSET 0u #define SET 1u EXTERN void __kmpc_impl_init_lock(omp_lock_t *lock) { __kmpc_impl_unset_lock(lock); } EXTERN void __kmpc_impl_destroy_lock(omp_lock_t *lock) { __kmpc_impl_unset_lock(lock); } EXTERN void __kmpc_impl_set_lock(omp_lock_t *lock) { // TODO: not sure spinning is a good idea here.. while (__kmpc_atomic_cas(lock, UNSET, SET) != UNSET) { int32_t start = __nvvm_read_ptx_sreg_clock(); int32_t now; for (;;) { now = __nvvm_read_ptx_sreg_clock(); int32_t cycles = now > start ? now - start : now + (0xffffffff - start); if (cycles >= __OMP_SPIN * GetBlockIdInKernel()) { break; } } } // wait for 0 to be the read value } EXTERN void __kmpc_impl_unset_lock(omp_lock_t *lock) { (void)__kmpc_atomic_exchange(lock, UNSET); } EXTERN int __kmpc_impl_test_lock(omp_lock_t *lock) { return __kmpc_atomic_add(lock, 0u); } EXTERN void *__kmpc_impl_malloc(size_t x) { return malloc(x); } EXTERN void __kmpc_impl_free(void *x) { free(x); } #pragma omp end declare target
0f05a0346530ecf6c26a55c973bd2f5246c94377.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "mpz.h" __device__ mpz_t* REDC(int RL, mpz_t* N, mpz_t* N_, mpz_t* T, mpz_t* tmp, mpz_t* t){//mpz_t* RMOD, int L, mpz_t* N, mpz_t* N_ should not be changed. //m = ((T & R) * N_) & R mpz_bitwise_truncate(t, T, RL); mpz_mult(tmp, N_, t); mpz_bitwise_truncate_eq(tmp, RL); //t = (T + m*N) >> L mpz_mult(t, tmp , N); mpz_add(tmp, T, t); mpz_bitwise_rshift(t, tmp, RL); if (mpz_gte(t , N)){ mpz_sub(tmp, t, N); mpz_set(t, tmp); return t; } else{ mpz_sub(tmp, t, N); mpz_set(tmp, t); return t; } } __global__ void MontSQMLadder(mpz_t * mes1, long long unsigned pairs, mpz_t* _x1, mpz_t* _x2, mpz_t* tmp, mpz_t* tmp2, int rl, mpz_t r2, mpz_t vn, mpz_t vn_, int* eBits, int eLength, long long int* clockTable, mpz_t* t) { __shared__ digit_t s_index[32]; long long int t1, t2; int k = blockIdx.x * blockDim.x + threadIdx.x; //to accelerate the experiment, we put all messages in one kernel launch. In the real case, each message causes one kernel launch. for(long long unsigned iter1 = 0; iter1 < pairs; iter1++){ mpz_set(&_x1[k], &mes1[2 * iter1 + k]);//next _x1 access will cause L1 miss if the L1 policy is write evict, same as using mutiple kernels. s_index[k] = mpz_get_last_digit(&_x1[k]);//make a dependency to make sure previous store is finished. t1 = clock64();//beginning of necessary instructions within the kernel mpz_t* n = &vn; mpz_t* n_ = &vn_; int j = blockIdx.x * blockDim.x + threadIdx.x; //_x1 = REDC(rmod,n,n_,mes*r2,l) mpz_mult(&tmp2[j], &_x1[j], &r2); mpz_set( &_x1[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); //x2 = _x1 * _x1 mpz_mult(&tmp2[j], &_x1[j], &t[j]); //_x2 = REDC(rmod,n,n_,_x2,l) mpz_set( &_x2[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); // if(j == 0){ // mpz_print_str_device(&_x1[j]); // printf(" "); // mpz_print_str_device(&_x2[j]); // printf("\n"); // } for(int i = eLength - 2; i >= 0; i--){ if(eBits[i] == 0){ //x2 = _x1 * _x2 mpz_mult(&tmp2[j], &_x1[j], &_x2[j]); //_x2 = REDC(rmod,n,n_,_x2,l) mpz_set( &_x2[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); //_x1 = _x1 * _x1 mpz_set( &tmp[j], &_x1[j]); mpz_mult(&tmp2[j], &_x1[j], &tmp[j]); //_x1 = REDC(rmod,n,n_,_x1,l) mpz_set( &_x1[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); } else { //_x1 = _x1 * _x2 mpz_mult(&tmp2[j], &_x1[j], &_x2[j]); //_x1 = REDC(rmod,n,n_,_x1,l) #changes: more efficient mpz_set( &_x1[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); //_x2 = _x2 * _x2 mpz_set( &tmp[j], &_x2[j]); mpz_mult(&tmp2[j], &_x2[j], &tmp[j]); //_x2 = REDC(rmod,n,n_,_x2,l) #changes: more efficient mpz_set( &_x2[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); } } //_x1 = REDC(rmod,n,n_,_x1,l) mpz_set( &_x1[j], REDC(rl, n, n_, &_x1[j], &tmp[j], &t[j]) ); s_index[k] = mpz_get_last_digit(&_x1[k]);//make a dependency to make sure previous store is finished. t2 = clock64();//end of necessary kernel instructions if( j == 1){ clockTable[iter1] = t2 - t1; } } } __global__ void init(mpz_t* _x1, mpz_t* _x2, mpz_t* tmp, mpz_t* tmp2, mpz_t* t){ int j = blockIdx.x * blockDim.x + threadIdx.x; mpz_init(&tmp[j]);////initial value not used mpz_init(&tmp2[j]);////initial value not used mpz_init(&_x1[j]);////initial value required mpz_init(&_x2[j]);////initial value required mpz_init(&t[j]);////initial value not used }
0f05a0346530ecf6c26a55c973bd2f5246c94377.cu
#include <stdio.h> #include <stdlib.h> #include "mpz.h" __device__ mpz_t* REDC(int RL, mpz_t* N, mpz_t* N_, mpz_t* T, mpz_t* tmp, mpz_t* t){//mpz_t* RMOD, int L, mpz_t* N, mpz_t* N_ should not be changed. //m = ((T & R) * N_) & R mpz_bitwise_truncate(t, T, RL); mpz_mult(tmp, N_, t); mpz_bitwise_truncate_eq(tmp, RL); //t = (T + m*N) >> L mpz_mult(t, tmp , N); mpz_add(tmp, T, t); mpz_bitwise_rshift(t, tmp, RL); if (mpz_gte(t , N)){ mpz_sub(tmp, t, N); mpz_set(t, tmp); return t; } else{ mpz_sub(tmp, t, N); mpz_set(tmp, t); return t; } } __global__ void MontSQMLadder(mpz_t * mes1, long long unsigned pairs, mpz_t* _x1, mpz_t* _x2, mpz_t* tmp, mpz_t* tmp2, int rl, mpz_t r2, mpz_t vn, mpz_t vn_, int* eBits, int eLength, long long int* clockTable, mpz_t* t) { __shared__ digit_t s_index[32]; long long int t1, t2; int k = blockIdx.x * blockDim.x + threadIdx.x; //to accelerate the experiment, we put all messages in one kernel launch. In the real case, each message causes one kernel launch. for(long long unsigned iter1 = 0; iter1 < pairs; iter1++){ mpz_set(&_x1[k], &mes1[2 * iter1 + k]);//next _x1 access will cause L1 miss if the L1 policy is write evict, same as using mutiple kernels. s_index[k] = mpz_get_last_digit(&_x1[k]);//make a dependency to make sure previous store is finished. t1 = clock64();//beginning of necessary instructions within the kernel mpz_t* n = &vn; mpz_t* n_ = &vn_; int j = blockIdx.x * blockDim.x + threadIdx.x; //_x1 = REDC(rmod,n,n_,mes*r2,l) mpz_mult(&tmp2[j], &_x1[j], &r2); mpz_set( &_x1[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); //x2 = _x1 * _x1 mpz_mult(&tmp2[j], &_x1[j], &t[j]); //_x2 = REDC(rmod,n,n_,_x2,l) mpz_set( &_x2[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); // if(j == 0){ // mpz_print_str_device(&_x1[j]); // printf(" "); // mpz_print_str_device(&_x2[j]); // printf("\n"); // } for(int i = eLength - 2; i >= 0; i--){ if(eBits[i] == 0){ //x2 = _x1 * _x2 mpz_mult(&tmp2[j], &_x1[j], &_x2[j]); //_x2 = REDC(rmod,n,n_,_x2,l) mpz_set( &_x2[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); //_x1 = _x1 * _x1 mpz_set( &tmp[j], &_x1[j]); mpz_mult(&tmp2[j], &_x1[j], &tmp[j]); //_x1 = REDC(rmod,n,n_,_x1,l) mpz_set( &_x1[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); } else { //_x1 = _x1 * _x2 mpz_mult(&tmp2[j], &_x1[j], &_x2[j]); //_x1 = REDC(rmod,n,n_,_x1,l) #changes: more efficient mpz_set( &_x1[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); //_x2 = _x2 * _x2 mpz_set( &tmp[j], &_x2[j]); mpz_mult(&tmp2[j], &_x2[j], &tmp[j]); //_x2 = REDC(rmod,n,n_,_x2,l) #changes: more efficient mpz_set( &_x2[j], REDC(rl, n, n_, &tmp2[j], &tmp[j], &t[j]) ); } } //_x1 = REDC(rmod,n,n_,_x1,l) mpz_set( &_x1[j], REDC(rl, n, n_, &_x1[j], &tmp[j], &t[j]) ); s_index[k] = mpz_get_last_digit(&_x1[k]);//make a dependency to make sure previous store is finished. t2 = clock64();//end of necessary kernel instructions if( j == 1){ clockTable[iter1] = t2 - t1; } } } __global__ void init(mpz_t* _x1, mpz_t* _x2, mpz_t* tmp, mpz_t* tmp2, mpz_t* t){ int j = blockIdx.x * blockDim.x + threadIdx.x; mpz_init(&tmp[j]);////initial value not used mpz_init(&tmp2[j]);////initial value not used mpz_init(&_x1[j]);////initial value required mpz_init(&_x2[j]);////initial value required mpz_init(&t[j]);////initial value not used }
4c76cd338d1d7b5d4aa3b08c6f9a08aea4f1f420.hip
// !!! This is a file automatically generated by hipify!!! #include <cusp/array1d.h> #include <cusp/array2d.h> #include <cusp/blas/blas.h> #include <cusp/system/cuda/detail/cublas/blas.h> #include <iostream> #include <stdio.h> #include "../timer.h" template <typename T, typename MemorySpace=cusp::device_memory> struct test_nrm2 { cusp::array1d<T,MemorySpace> x; const size_t n; hipblasHandle_t handle; test_nrm2(const size_t n) : n(n), x(n) { if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } } ~test_nrm2() { hipblasDestroy(handle); } void operator()(void) { cusp::blas::nrm2(cusp::cuda::par.with(handle), x); } std::string name(void) const { return (sizeof(T) == 4) ? "snrm2" : "dnrm2"; } size_t bytes(void) const { return n * sizeof(T); } }; template <typename T, typename MemorySpace=cusp::device_memory> struct test_dot { cusp::array1d<T,MemorySpace> x, y; const size_t n; hipblasHandle_t handle; test_dot(const size_t n) : n(n), x(n), y(n) { if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } } ~test_dot() { hipblasDestroy(handle); } void operator()(void) { cusp::blas::dot(cusp::cuda::par.with(handle), x, y); } std::string name(void) const { return (sizeof(T) == 4) ? "sdot" : "ddot"; } size_t bytes(void) const { return 2 * n * sizeof(T); } }; template <typename T, typename MemorySpace=cusp::device_memory> struct test_axpy { cusp::array1d<T,MemorySpace> x, y; const size_t n; hipblasHandle_t handle; test_axpy(const size_t n) : n(n), x(n), y(n) { if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } } ~test_axpy() { hipblasDestroy(handle); } void operator()(void) { cusp::blas::axpy(cusp::cuda::par.with(handle), x, y, T(1.0)); } std::string name(void) const { return (sizeof(T) == 4) ? "saxpy" : "daxpy"; } size_t bytes(void) const { return 3 * n * sizeof(T); } }; template <typename T, typename MemorySpace=cusp::device_memory, typename Orientation=cusp::column_major> struct test_gemm { cusp::array2d<T,MemorySpace,Orientation> A, B; const size_t n; hipblasHandle_t handle; test_gemm(const size_t n) : n(n), A(n,n), B(n,n) { if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } } ~test_gemm() { hipblasDestroy(handle); } void operator()(void) { cusp::blas::gemm(cusp::cuda::par.with(handle), A, A, B); } std::string name(void) const { return (sizeof(T) == 4) ? "sgemm" : "dgemm"; } size_t bytes(void) const { return A.num_rows * B.num_rows * B.num_cols * sizeof(T); } }; template <typename Test> void benchmark(const size_t n, const size_t iterations = 5) { Test test(n); test(); timer t0; for(size_t i = 0; i < iterations; i++) test(); float ms = t0.milliseconds_elapsed() / iterations; float bw = (test.bytes() / (ms / 1e3)) / 1e9; printf("%-10s %6.1f GB/s [ %8.3f ms]\n", test.name().c_str(), bw, ms); } int main(int argc, char ** argv) { for (size_t e = 16; e < 27; e++) { size_t n = 1 << e; std::cout << "N = " << n << std::endl; benchmark< test_nrm2<float> >(n); benchmark< test_nrm2<double> >(n); benchmark< test_dot <float> >(n); benchmark< test_dot <double> >(n); benchmark< test_axpy<float> >(n); benchmark< test_axpy<double> >(n); } printf("\n"); for (size_t n = 900; n < 1500; n += 100) { std::cout << "N = " << n << std::endl; benchmark< test_gemm<float, cusp::device_memory, cusp::row_major> >(n); benchmark< test_gemm<float, cusp::device_memory, cusp::column_major> >(n); benchmark< test_gemm<double, cusp::device_memory, cusp::row_major> >(n); benchmark< test_gemm<double, cusp::device_memory, cusp::column_major> >(n); } return 0; }
4c76cd338d1d7b5d4aa3b08c6f9a08aea4f1f420.cu
#include <cusp/array1d.h> #include <cusp/array2d.h> #include <cusp/blas/blas.h> #include <cusp/system/cuda/detail/cublas/blas.h> #include <iostream> #include <stdio.h> #include "../timer.h" template <typename T, typename MemorySpace=cusp::device_memory> struct test_nrm2 { cusp::array1d<T,MemorySpace> x; const size_t n; cublasHandle_t handle; test_nrm2(const size_t n) : n(n), x(n) { if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } } ~test_nrm2() { cublasDestroy(handle); } void operator()(void) { cusp::blas::nrm2(cusp::cuda::par.with(handle), x); } std::string name(void) const { return (sizeof(T) == 4) ? "snrm2" : "dnrm2"; } size_t bytes(void) const { return n * sizeof(T); } }; template <typename T, typename MemorySpace=cusp::device_memory> struct test_dot { cusp::array1d<T,MemorySpace> x, y; const size_t n; cublasHandle_t handle; test_dot(const size_t n) : n(n), x(n), y(n) { if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } } ~test_dot() { cublasDestroy(handle); } void operator()(void) { cusp::blas::dot(cusp::cuda::par.with(handle), x, y); } std::string name(void) const { return (sizeof(T) == 4) ? "sdot" : "ddot"; } size_t bytes(void) const { return 2 * n * sizeof(T); } }; template <typename T, typename MemorySpace=cusp::device_memory> struct test_axpy { cusp::array1d<T,MemorySpace> x, y; const size_t n; cublasHandle_t handle; test_axpy(const size_t n) : n(n), x(n), y(n) { if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } } ~test_axpy() { cublasDestroy(handle); } void operator()(void) { cusp::blas::axpy(cusp::cuda::par.with(handle), x, y, T(1.0)); } std::string name(void) const { return (sizeof(T) == 4) ? "saxpy" : "daxpy"; } size_t bytes(void) const { return 3 * n * sizeof(T); } }; template <typename T, typename MemorySpace=cusp::device_memory, typename Orientation=cusp::column_major> struct test_gemm { cusp::array2d<T,MemorySpace,Orientation> A, B; const size_t n; cublasHandle_t handle; test_gemm(const size_t n) : n(n), A(n,n), B(n,n) { if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } } ~test_gemm() { cublasDestroy(handle); } void operator()(void) { cusp::blas::gemm(cusp::cuda::par.with(handle), A, A, B); } std::string name(void) const { return (sizeof(T) == 4) ? "sgemm" : "dgemm"; } size_t bytes(void) const { return A.num_rows * B.num_rows * B.num_cols * sizeof(T); } }; template <typename Test> void benchmark(const size_t n, const size_t iterations = 5) { Test test(n); test(); timer t0; for(size_t i = 0; i < iterations; i++) test(); float ms = t0.milliseconds_elapsed() / iterations; float bw = (test.bytes() / (ms / 1e3)) / 1e9; printf("%-10s %6.1f GB/s [ %8.3f ms]\n", test.name().c_str(), bw, ms); } int main(int argc, char ** argv) { for (size_t e = 16; e < 27; e++) { size_t n = 1 << e; std::cout << "N = " << n << std::endl; benchmark< test_nrm2<float> >(n); benchmark< test_nrm2<double> >(n); benchmark< test_dot <float> >(n); benchmark< test_dot <double> >(n); benchmark< test_axpy<float> >(n); benchmark< test_axpy<double> >(n); } printf("\n"); for (size_t n = 900; n < 1500; n += 100) { std::cout << "N = " << n << std::endl; benchmark< test_gemm<float, cusp::device_memory, cusp::row_major> >(n); benchmark< test_gemm<float, cusp::device_memory, cusp::column_major> >(n); benchmark< test_gemm<double, cusp::device_memory, cusp::row_major> >(n); benchmark< test_gemm<double, cusp::device_memory, cusp::column_major> >(n); } return 0; }
8e795dbde293d9ff5c150cd9ab3f66793e726a33.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "star3d3r-32x32-2-128_kernel.hu" #define BENCH_DIM 3 #define BENCH_FPP 37 #define BENCH_RAD 3 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize][dimsize])A1; if (scop) { if (dimsize >= 7 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) double *dev_A; cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double))); { cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice)); #ifdef STENCILBENCH hipDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 3 - 3); const AN5D_TYPE __c3Pad = (3); #define __c3 c3 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; const AN5D_TYPE __halo3 = 3; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 20; const AN5D_TYPE __side3Len = 20; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH hipDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = 0.25000f * A[t%2][i][j][k] + 0.04276f * A[t%2][i][j][k-3] + 0.04176f * A[t%2][i][j][k-2] + 0.04076f * A[t%2][i][j][k-1] + 0.04046f * A[t%2][i][j][k+1] + 0.04146f * A[t%2][i][j][k+2] + 0.04246f * A[t%2][i][j][k+3] + 0.04096f * A[t%2][i-1][j][k] + 0.04066f * A[t%2][i+1][j][k] + 0.04086f * A[t%2][i][j-1][k] + 0.04056f * A[t%2][i][j+1][k] + 0.04196f * A[t%2][i-2][j][k] + 0.04166f * A[t%2][i+2][j][k] + 0.04186f * A[t%2][i][j-2][k] + 0.04156f * A[t%2][i][j+2][k] + 0.04296f * A[t%2][i-3][j][k] + 0.04266f * A[t%2][i+3][j][k] + 0.04286f * A[t%2][i][j-3][k] + 0.04256f * A[t%2][i][j+3][k]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
8e795dbde293d9ff5c150cd9ab3f66793e726a33.cu
#include <assert.h> #include <stdio.h> #include "star3d3r-32x32-2-128_kernel.hu" #define BENCH_DIM 3 #define BENCH_FPP 37 #define BENCH_RAD 3 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize][dimsize])A1; if (scop) { if (dimsize >= 7 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) double *dev_A; cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double))); { cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice)); #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 3 - 3); const AN5D_TYPE __c3Pad = (3); #define __c3 c3 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; const AN5D_TYPE __halo3 = 3; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 20; const AN5D_TYPE __side3Len = 20; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = 0.25000f * A[t%2][i][j][k] + 0.04276f * A[t%2][i][j][k-3] + 0.04176f * A[t%2][i][j][k-2] + 0.04076f * A[t%2][i][j][k-1] + 0.04046f * A[t%2][i][j][k+1] + 0.04146f * A[t%2][i][j][k+2] + 0.04246f * A[t%2][i][j][k+3] + 0.04096f * A[t%2][i-1][j][k] + 0.04066f * A[t%2][i+1][j][k] + 0.04086f * A[t%2][i][j-1][k] + 0.04056f * A[t%2][i][j+1][k] + 0.04196f * A[t%2][i-2][j][k] + 0.04166f * A[t%2][i+2][j][k] + 0.04186f * A[t%2][i][j-2][k] + 0.04156f * A[t%2][i][j+2][k] + 0.04296f * A[t%2][i-3][j][k] + 0.04266f * A[t%2][i+3][j][k] + 0.04286f * A[t%2][i][j-3][k] + 0.04256f * A[t%2][i][j+3][k]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
8fa5f5f69fddc224f3622ed095c8f7c7ae18d396.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float* X, hipStream_t str) { hipStream_t initial_stream; CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double* X, hipStream_t str) { hipStream_t initial_stream; CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y, const int stride) { CUDA_KERNEL_LOOP(index, n) { y[index*stride] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y, const int stride) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y, stride); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y, const int stride) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y, stride); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
8fa5f5f69fddc224f3622ed095c8f7c7ae18d396.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float* X, cudaStream_t str) { cudaStream_t initial_stream; CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double* X, cudaStream_t str) { cudaStream_t initial_stream; CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y, const int stride) { CUDA_KERNEL_LOOP(index, n) { y[index*stride] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y, const int stride) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y, stride); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y, const int stride) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y, stride); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <typename Dtype> __global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = sqrt(a[index]); } } template <> void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
783adabb8d3e3d3db9327808ce5c47cd550352bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_setup.h" __device__ __inline__ double shfl_xor(double value, int const lane, int const warpsize) { return __hiloint2double(__shfl_xor(__double2hiint(value), lane, warpsize), __shfl_xor(__double2loint(value), lane, warpsize)); } __device__ double atomicAdd2(double *address, double val) { unsigned long long int *address_as_ull = (unsigned long long int *)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } extern "C" void cudaMemoryTest() { printf("cudamemorytest 0\n"); const unsigned int N = 1048576; const unsigned int bytes = N * sizeof(int); //int *h_a = (int*)malloc(bytes); int *h_a; int *d_a; HANDLE_ERROR(hipHostMalloc((void **)&h_a, bytes, hipHostMallocDefault)); //HANDLE_ERROR(hipHostMalloc((void**)&h_a, bytes)); HANDLE_ERROR(hipMalloc((void**)&d_a, bytes)); memset(h_a, 0, bytes); HANDLE_ERROR(hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(h_a, d_a, bytes, hipMemcpyDeviceToHost)); HANDLE_ERROR(hipHostFree(h_a)); HANDLE_ERROR(hipFree(d_a)); printf("cudamemorytest 1\n"); return; } extern "C" int cuda_alloc_atom_info(int in_max_n_atoms_exbox, // int in_max_n_atom_array, int in_max_n_cells, int in_max_n_cell_pairs, int in_n_columns) { printf("cuda_alloc_atom_info\n"); printf("d_crd : %d \n", max_n_atom_array * 3 * sizeof(real_pw)); max_n_atoms_exbox = in_max_n_atoms_exbox; // max_n_atom_array = in_max_n_atom_array; max_n_cell_pairs = in_max_n_cell_pairs; max_n_cells = in_max_n_cells; HANDLE_ERROR(hipMalloc((void **)&d_crd_chg, max_n_atom_array * sizeof(real4))); HANDLE_ERROR(hipMalloc((void **)&d_cell_z, max_n_cells * sizeof(real2))); HANDLE_ERROR(hipMalloc((void **)&d_crd, max_n_atom_array * 3 * sizeof(real_pw))); HANDLE_ERROR(hipMalloc((void **)&d_charge_orig, max_n_atoms_exbox * sizeof(real_pw))); HANDLE_ERROR(hipMalloc((void **)&d_atomtype, max_n_atom_array * sizeof(int))); HANDLE_ERROR(hipMalloc((void **)&d_atomids, max_n_atom_array * sizeof(int))); HANDLE_ERROR(hipMalloc((void **)&d_atomids_rev, max_n_atoms_exbox * sizeof(int))); HANDLE_ERROR(hipMalloc((void **)&d_atomtype_orig, max_n_atoms_exbox * sizeof(int))); HANDLE_ERROR(hipMalloc((void **)&d_cell_pairs, max_n_cell_pairs * sizeof(CellPair))); HANDLE_ERROR(hipMalloc((void **)&d_cell_pairs_buf, max_n_cell_pairs * sizeof(CellPair))); HANDLE_ERROR(hipMalloc((void **)&d_idx_head_cell_pairs, (in_max_n_cells + 1) * sizeof(int))); HANDLE_ERROR(hipMalloc((void **)&d_idx_cell_column, (in_max_n_cells) * sizeof(int))); HANDLE_ERROR(hipHostMalloc((void **)&h_idx_cell_column, in_max_n_cells * sizeof(int), hipHostMallocDefault)); // HANDLE_ERROR( hipMalloc((void**)&d_cell_pair_removed, //(in_max_n_cells+1) * sizeof(int)) ); HANDLE_ERROR(hipMalloc((void **)&d_n_cell_pairs, (max_n_cells) * sizeof(int))); // HANDLE_ERROR( hipMalloc((void**)&d_grid_atom_index, //(max_n_ + 1) * sizeof(int)) ); HANDLE_ERROR(hipMalloc((void **)&d_energy, N_MULTI_WORK * 2 * sizeof(real_fc))); HANDLE_ERROR(hipMalloc((void **)&d_work, N_MULTI_WORK * max_n_atom_array * 3 * sizeof(real_fc))); HANDLE_ERROR(hipMalloc((void **)&d_idx_xy_head_cell, in_n_columns * sizeof(int))); HANDLE_ERROR(hipMemcpyToSymbol(D_MAX_N_CELL_PAIRS, &in_max_n_cell_pairs, sizeof(int))); return 0; } extern "C" int cuda_free_atom_info() { // printf("cuda_free_device_atom_info\n"); HANDLE_ERROR(hipFree(d_crd_chg)); HANDLE_ERROR(hipFree(d_cell_z)); HANDLE_ERROR(hipFree(d_crd)); HANDLE_ERROR(hipFree(d_atomids)); HANDLE_ERROR(hipFree(d_atomids_rev)); HANDLE_ERROR(hipFree(d_charge_orig)); HANDLE_ERROR(hipFree(d_atomtype)); HANDLE_ERROR(hipFree(d_atomtype_orig)); HANDLE_ERROR(hipFree(d_cell_pairs)); HANDLE_ERROR(hipFree(d_cell_pairs_buf)); HANDLE_ERROR(hipFree(d_idx_head_cell_pairs)); HANDLE_ERROR(hipFree(d_idx_cell_column)); HANDLE_ERROR(hipHostFree(h_idx_cell_column)); // HANDLE_ERROR( hipFree(d_cell_pair_removed) ); HANDLE_ERROR(hipFree(d_n_cell_pairs)); HANDLE_ERROR(hipFree(d_energy)); HANDLE_ERROR(hipFree(d_work)); HANDLE_ERROR(hipFree(d_idx_xy_head_cell)); // HANDLE_ERROR( hipFree(d_uni2cell_z)); // HANDLE_ERROR( hipFree(d_work_orig) ); return 0; } extern "C" int cuda_memcpy_htod_atomids(int *&h_atomids, int *&h_idx_xy_head_cell) { HANDLE_ERROR(hipMemset(d_atomids, -1, sizeof(int) * max_n_atom_array)); HANDLE_ERROR(hipMemcpy(d_atomids, h_atomids, n_atom_array * sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR( hipMemcpy(d_idx_xy_head_cell, h_idx_xy_head_cell, (n_columns + 1) * sizeof(int), hipMemcpyHostToDevice)); return 0; } // cuda_memcpy_htod_atom_info // Arrays of charges and atomtypes of all atoms in the process are sent to // the device. extern "C" int cuda_memcpy_htod_atom_info(real_pw *&h_charge_orig, int *&h_atomtype_orig) { HANDLE_ERROR(hipMemcpy(d_charge_orig, h_charge_orig, n_atoms_system * sizeof(real_pw), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_atomtype_orig, h_atomtype_orig, n_atoms_system * sizeof(int), hipMemcpyHostToDevice)); return 0; } // cuda_memcpy_htod_crd // Sending nsgrid.crd to device //extern "C" int cuda_memcpy_htod_crd(real_pw *&h_crd) { extern "C" int cuda_memcpy_htod_crd(real_pw *&h_crd) { //HANDLE_ERROR(hipMemcpy(d_crd, h_crd, n_atom_array * 3 * sizeof(real_pw), hipMemcpyHostToDevice)); //printf("cuda_memcpy_htod_crd h_crd : %d\n",n_atom_array * 3 * sizeof(real_pw)); //cudaMemoryTest(); //HANDLE_ERROR(hipMemcpy(d_crd, h_crd, sizeof(float), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_crd, h_crd, n_atom_array * 3 * sizeof(real_pw), hipMemcpyHostToDevice)); //HANDLE_ERROR(hipMemcpy(d_crd, h_crd, n_atom_array * 3 * sizeof(real_pw), hipMemcpyHostToDevice)); // cudaMemoryTest(); return 0; } extern "C" int cuda_set_pbc(real_pw *l, real_pw *lb) { printf("cuda_set_pbc\n"); HANDLE_ERROR(hipMemcpyToSymbol(PBC_L, l, sizeof(real_pw) * 3)); real_pw l_inv[3]; for(int d=0; d<3; d++) l_inv[d] = 1.0/l[d]; HANDLE_ERROR(hipMemcpyToSymbol(PBC_L_INV, l_inv, sizeof(real_pw) * 3)); HANDLE_ERROR(hipMemcpyToSymbol(PBC_LOWER_BOUND, lb, sizeof(real_pw) * 3)); return 0; } extern "C" int cuda_zerodipole_constant(real_pw zcore, real_pw bcoeff, real_pw fcoeff) { HANDLE_ERROR(hipMemcpyToSymbol(D_ZCORE, &zcore, sizeof(real_pw))); HANDLE_ERROR(hipMemcpyToSymbol(D_BCOEFF, &bcoeff, sizeof(real_pw))); HANDLE_ERROR(hipMemcpyToSymbol(D_FCOEFF, &fcoeff, sizeof(real_pw))); return 0; } // cuda_set_cell_constant // These constants are updated when the cell grid is updated extern "C" int cuda_set_cell_constant(const int in_n_cells, const int in_n_atoms_exbox, const int in_n_atom_array, const int * in_n_cells_xyz, const int in_n_columns, const real_pw *in_l_cell_xyz, const int * in_n_neighbor_xyz) { n_atoms_exbox = in_n_atoms_exbox; n_cells = in_n_cells; n_atom_array = in_n_atom_array; n_columns = in_n_columns; HANDLE_ERROR(hipMemcpyToSymbol(D_N_CELLS, &n_cells, sizeof(int))); HANDLE_ERROR(hipMemcpyToSymbol(D_N_ATOM_ARRAY, &n_atom_array, sizeof(int))); HANDLE_ERROR(hipMemcpyToSymbol(D_N_CELLS_XYZ, in_n_cells_xyz, sizeof(int) * 3)); HANDLE_ERROR(hipMemcpyToSymbol(D_N_COLUMNS, &n_columns, sizeof(int))); HANDLE_ERROR(hipMemcpyToSymbol(D_L_CELL_XYZ, in_l_cell_xyz, sizeof(real_pw) * 3)); HANDLE_ERROR(hipMemcpyToSymbol(D_N_NEIGHBOR_XYZ, in_n_neighbor_xyz, sizeof(int) * 3)); const int n_neighbor_col = (in_n_neighbor_xyz[0] * 2 + 1) * (in_n_neighbor_xyz[1] * 2 + 1); HANDLE_ERROR(hipMemcpyToSymbol(D_N_NEIGHBOR_COL, &n_neighbor_col, sizeof(int))); const int max_n_cell_pairs_per_cell = max_n_cell_pairs / n_cells; HANDLE_ERROR(hipMemcpyToSymbol(D_MAX_N_CELL_PAIRS_PER_CELL, &max_n_cell_pairs_per_cell, sizeof(int))); //printf("dbg0420 const %d %d %d %d %d %d ", //n_cells, n_atom_array, //in_n_cells_xyz[0], in_n_cells_xyz[1], in_n_cells_xyz[2], //n_columns); //printf(" %f %f %f %d %d %d %d %d\n", //in_l_cell_xyz[0], in_l_cell_xyz[1], in_l_cell_xyz[2], //in_n_neighbor_xyz[0],in_n_neighbor_xyz[1],in_n_neighbor_xyz[2], //n_neighbor_col, max_n_cell_pairs_per_cell); return 0; } // cuda_set_constant // called only onece at the beginning of simulation extern "C" int cuda_set_constant(real_pw cutoff, real_pw cutoff_pairlist, int n_atomtypes) { real_pw tmp_charge_coeff = (real_pw)332.06378; // CelesteObject::CHARGE_COEFF; HANDLE_ERROR(hipMemcpyToSymbol(D_CHARGE_COEFF, &tmp_charge_coeff, sizeof(real_pw))); HANDLE_ERROR(hipMemcpyToSymbol(D_N_ATOMTYPES, &n_atomtypes, sizeof(int))); HANDLE_ERROR(hipMemcpyToSymbol(D_CUTOFF, &cutoff, sizeof(real_pw))); HANDLE_ERROR(hipMemcpyToSymbol(D_CUTOFF_PAIRLIST, &cutoff_pairlist, sizeof(real_pw))); const real_pw cutoff_pairlist_2 = cutoff_pairlist * cutoff_pairlist; HANDLE_ERROR(hipMemcpyToSymbol(D_CUTOFF_PAIRLIST_2, &cutoff_pairlist_2, sizeof(real_pw))); return 0; } extern "C" int cuda_alloc_set_lj_params(real_pw * h_lj_6term, real_pw * h_lj_12term, int n_lj_types, int * h_nb15off, const int in_max_n_nb15off) { // printf("threads : %d\n", PW_THREADS); printf("cuda_alloc_set_lj_params\n"); const unsigned int size_lj_matrix = sizeof(real_pw) * n_lj_types * n_lj_types; // hipMalloc HANDLE_ERROR(hipMalloc((void **)&d_lj_6term, size_lj_matrix)); HANDLE_ERROR(hipMalloc((void **)&d_lj_12term, size_lj_matrix)); max_n_nb15off = in_max_n_nb15off; HANDLE_ERROR(hipMemcpyToSymbol(D_MAX_N_NB15OFF, &max_n_nb15off, sizeof(int))); const unsigned int size_nb15off_orig = sizeof(int) * max_n_nb15off * max_n_atoms_exbox; HANDLE_ERROR(hipMalloc((void **)&d_nb15off_orig, size_nb15off_orig)); size_nb15off = max_n_nb15off * max_n_atom_array; printf("dbg0414 size_nb15off : %d\n",size_nb15off); HANDLE_ERROR(hipMalloc((void **)&d_nb15off, sizeof(int) * size_nb15off)); // hipBindTexture2D /* hipChannelFormatDesc desc = hipCreateChannelDesc<real>(); HANDLE_ERROR( hipBindTexture2D( NULL, tex_lj_6term, d_lj_6term, desc, n_lj_types, n_lj_types, sizeof(real)*n_lj_types) ); HANDLE_ERROR( hipBindTexture2D( NULL, tex_lj_12term, d_lj_12term, desc, n_lj_types, n_lj_types, sizeof(real)*n_lj_types) ); */ // cudaMempcpy HANDLE_ERROR(hipMemcpy(d_lj_6term, h_lj_6term, size_lj_matrix, hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_lj_12term, h_lj_12term, size_lj_matrix, hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_nb15off_orig, h_nb15off, size_nb15off_orig, hipMemcpyHostToDevice)); return 0; } extern "C" int cuda_free_lj_params() { // printf("cuda_free_lj_param\n"); // hipUnbindTexture(tex_lj_6term); // hipUnbindTexture(tex_lj_12term); HANDLE_ERROR(hipFree(d_lj_6term)); HANDLE_ERROR(hipFree(d_lj_12term)); HANDLE_ERROR(hipFree(d_nb15off_orig)); HANDLE_ERROR(hipFree(d_nb15off)); return 0; } // cuda_hostalloc_atom_type_charge extern "C" int cuda_hostalloc_atom_type_charge(int *&h_atom_type, real_pw *&h_charge, const int in_n_atoms_system) { n_atoms_system = in_n_atoms_system; HANDLE_ERROR(hipMemcpyToSymbol(D_N_ATOMS_SYSTEM, &in_n_atoms_system, sizeof(int))); printf("hostalloc atom_type_charge cu %d\n", in_n_atoms_system); HANDLE_ERROR(hipHostMalloc((void **)&h_atom_type, n_atoms_system * sizeof(int), hipHostMallocDefault)); HANDLE_ERROR(hipHostMalloc((void **)&h_charge, n_atoms_system * sizeof(real_pw), hipHostMallocDefault)); return 0; } // cuda_hostalloc_atom_info // Allocation for MiniCell members extern "C" int cuda_hostalloc_atom_info(real_pw *& h_crd, int *& h_atomids, real_fc *& h_work, real_fc *& h_energy, int in_max_n_atom_array) { max_n_atom_array = in_max_n_atom_array; printf("hostalloc_atom_info %d\n", max_n_atom_array); HANDLE_ERROR(hipHostMalloc((void **)&h_crd, max_n_atom_array * 3 * sizeof(real_pw), hipHostMallocDefault)); HANDLE_ERROR(hipHostMalloc((void **)&h_atomids, max_n_atom_array * sizeof(int), hipHostMallocDefault)); HANDLE_ERROR(hipHostMalloc((void **)&h_work, max_n_atom_array * 3 * sizeof(real_fc), hipHostMallocDefault)); HANDLE_ERROR(hipHostMalloc((void **)&h_energy, 2 * sizeof(real_fc), hipHostMallocDefault)); return 0; } extern "C" int cuda_hostalloc_cell_info(int *&h_idx_xy_head_cell, int n_columns) { printf("cuda_hostalloc_cell_info cu\n"); HANDLE_ERROR(hipHostMalloc((void **)&h_idx_xy_head_cell, (n_columns) * sizeof(int), hipHostMallocDefault)); return 0; } extern "C" int cuda_hostalloc_cellpair_info(CellPair *&h_cell_pairs, int *& h_idx_head_cell_pairs, int *& h_n_cells_z, int max_n_cell_pairs, int max_n_cells, int n_columns) { printf("cuda_hostalloc_cellpair_info cu\n"); HANDLE_ERROR(hipHostMalloc((void **)&h_cell_pairs, max_n_cell_pairs * sizeof(CellPair), hipHostMallocDefault)); HANDLE_ERROR(hipHostMalloc((void **)&h_idx_head_cell_pairs, (max_n_cells) * sizeof(int), hipHostMallocDefault)); HANDLE_ERROR(hipHostMalloc((void **)&h_n_cells_z, (n_columns) * sizeof(int), hipHostMallocDefault)); return 0; } extern "C" int cuda_hostfree_cellpair_info(CellPair *h_cell_pairs, int *h_idx_head_cell_pairs, int *&h_n_cells_z) { HANDLE_ERROR(hipHostFree(h_cell_pairs)); HANDLE_ERROR(hipHostFree(h_idx_head_cell_pairs)); HANDLE_ERROR(hipHostFree(h_n_cells_z)); } extern "C" int cuda_hostfree_atom_type_charge(int *h_atom_type, real_pw *h_charge) { HANDLE_ERROR(hipHostFree(h_atom_type)); HANDLE_ERROR(hipHostFree(h_charge)); return 0; } extern "C" int cuda_hostfree_atom_info(real_pw *h_crd, int *h_atomids, real_fc *&h_work, real_fc *&h_energy) { HANDLE_ERROR(hipHostFree(h_crd)); HANDLE_ERROR(hipHostFree(h_atomids)); HANDLE_ERROR(hipHostFree(h_work)); HANDLE_ERROR(hipHostFree(h_energy)); return 0; } extern "C" int cuda_hostfree_cell_info(int *h_idx_xy_head_cell) { HANDLE_ERROR(hipHostFree(h_idx_xy_head_cell)); return 0; } __global__ void kernel_set_nb15off(const int *d_atomids, const int *d_atomids_rev, const int *d_nb15off_orig, int * d_nb15off) { /*if(threadIdx.x != 0) return; for(int g_thread_idx=0; g_thread_idx/D_MAX_N_NB15OFF < D_N_ATOM_ARRAY; g_thread_idx++){ const int atomid = g_thread_idx / D_MAX_N_NB15OFF; const int idx = g_thread_idx % D_MAX_N_NB15OFF; if (d_atomids[atomid] < 0) { d_nb15off[g_thread_idx] = atomid; } else { const int orig = d_nb15off_orig[d_atomids[atomid] * D_MAX_N_NB15OFF + idx]; if (orig == -1) { d_nb15off[g_thread_idx] = -1; } else { d_nb15off[g_thread_idx] = d_atomids_rev[orig]; } } }*/ const int g_thread_idx = threadIdx.x + blockDim.x * blockIdx.x; const int atomid = g_thread_idx / D_MAX_N_NB15OFF; const int idx = g_thread_idx % D_MAX_N_NB15OFF; if (atomid >= D_N_ATOM_ARRAY) return; if (d_atomids[atomid] < 0) { d_nb15off[g_thread_idx] = atomid; } else { const int orig = d_nb15off_orig[d_atomids[atomid] * D_MAX_N_NB15OFF + idx]; if (orig == -1) { d_nb15off[g_thread_idx] = -1; } else { d_nb15off[g_thread_idx] = d_atomids_rev[orig]; } } } __global__ void kernel_set_atominfo(const int * d_atomids, const int * d_atomtype_orig, const real_pw *d_charge_orig, int * d_atomtype, real4 * d_crd_chg, int * d_atomids_rev) { //dbg0420 int atomid = threadIdx.x + blockIdx.x * blockDim.x; if (atomid < D_N_ATOM_ARRAY && d_atomids[atomid] >= 0) { //if(threadIdx.x != 0) return; //for(int atomid = 0 ; atomid < D_N_ATOM_ARRAY; atomid ++){ d_atomtype[atomid] = d_atomtype_orig[d_atomids[atomid]]; d_crd_chg[atomid].w = d_charge_orig[d_atomids[atomid]]; d_atomids_rev[d_atomids[atomid]] = atomid; } } __global__ void kernel_set_crd(const int *d_atomids, const real_pw *d_crd, real4 *d_crd_chg, real2 *d_cell_z) { //dbg0420 /*if(threadIdx.x != 0) return; for(int atomid=0; atomid < D_N_ATOM_ARRAY; atomid++){ int at_idx = atomid * 3; d_crd_chg[atomid].x = d_crd[at_idx]; d_crd_chg[atomid].y = d_crd[at_idx + 1]; d_crd_chg[atomid].z = d_crd[at_idx + 2]; }*/ int atomid = threadIdx.x + blockIdx.x * blockDim.x; if (atomid < D_N_ATOM_ARRAY) { int at_idx = atomid * 3; d_crd_chg[atomid].x = d_crd[at_idx]; d_crd_chg[atomid].y = d_crd[at_idx + 1]; d_crd_chg[atomid].z = d_crd[at_idx + 2]; /*if(atomid % N_ATOM_CELL == 0){ d_cell_z[atomid/N_ATOM_CELL].x = d_crd[at_idx+2]; }else if(atomid % N_ATOM_CELL == N_ATOM_CELL-1){ d_cell_z[atomid/N_ATOM_CELL].y = d_crd[at_idx+2]; }*/ } } extern "C" int cuda_set_atominfo() { HANDLE_ERROR(hipMemset(d_atomtype, -1, sizeof(int) * max_n_atom_array)); HANDLE_ERROR(hipMemset(d_atomids_rev, -1, sizeof(int) * max_n_atoms_exbox)); HANDLE_ERROR(hipMemset(d_nb15off, -1, sizeof(int) * size_nb15off)); HANDLE_ERROR(hipMemset(d_n_cell_pairs, 0, sizeof(int) * max_n_cells)); // HANDLE_ERROR( hipMemset(d_cell_pair_removed, 0, sizeof(int)*max_n_cells )); int blocks1 = (n_atom_array + REORDER_THREADS - 1) / REORDER_THREADS; hipLaunchKernelGGL(( kernel_set_atominfo), dim3(blocks1), dim3(REORDER_THREADS), 0, 0, d_atomids, d_atomtype_orig, d_charge_orig, d_atomtype, d_crd_chg, d_atomids_rev); hipDeviceSynchronize(); //kernel_set_atominfo<<<1, REORDER_THREADS>>>(d_atomids, d_atomtype_orig, //d_charge_orig, d_atomtype, //d_crd_chg, d_atomids_rev); int blocks2 = (n_atom_array * max_n_nb15off + REORDER_THREADS - 1) / REORDER_THREADS; //dbg0420 hipLaunchKernelGGL(( kernel_set_nb15off), dim3(blocks2), dim3(REORDER_THREADS), 0, 0, d_atomids, d_atomids_rev, d_nb15off_orig, d_nb15off); //kernel_set_nb15off<<<1, REORDER_THREADS>>>(d_atomids, d_atomids_rev, d_nb15off_orig, d_nb15off); hipDeviceSynchronize(); return 0; } extern "C" int cuda_set_crd() { int blocks = (n_atom_array + REORDER_THREADS - 1) / REORDER_THREADS; //dbg0420 hipLaunchKernelGGL(( kernel_set_crd), dim3(blocks), dim3(REORDER_THREADS), 0, 0, d_atomids, d_crd, d_crd_chg, d_cell_z); hipDeviceSynchronize(); //kernel_set_crd<<<1, REORDER_THREADS>>>(d_atomids, d_crd, d_crd_chg, d_cell_z); return 0; } __global__ void kernel_set_work_orig(real_fc *d_work, const int *d_atomids) { int atomid = threadIdx.x + blockIdx.x * blockDim.x; if (atomid < D_N_ATOM_ARRAY) { // && d_atomids[atomid] >= 0){ int index_orig = atomid * 3; for (int n = 1; n < N_MULTI_WORK; n++) { int index = (atomid + D_N_ATOM_ARRAY * n) * 3; d_work[index_orig + 0] += d_work[index + 0]; d_work[index_orig + 1] += d_work[index + 1]; d_work[index_orig + 2] += d_work[index + 2]; } } } __global__ void kernel_reduction_energy(real_fc *d_energy) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = 1; i < N_MULTI_WORK; i++) { if (tid == 1) { d_energy[0] += d_energy[i * 2]; d_energy[1] += d_energy[i * 2 + 1]; // printf("ene %f %f\n",d_energy[0],d_energy[1]); } } } __device__ bool check_15off64(const int atom_idx1, const int atom_idx2, const int *bitmask, int & mask_id, int & interact_bit) { int bit_pos = atom_idx2 * N_ATOM_CELL + atom_idx1; mask_id = bit_pos / 32; interact_bit = 1 << (bit_pos % 32); return (bitmask[mask_id] & interact_bit) == interact_bit; } __device__ real_pw check_15off(const int atomid1, const int atomid2, const int tmask_a1, const int tmask_a2) { int aid_diff = atomid2 - atomid1; int target = tmask_a1; if (aid_diff < 0) { aid_diff = -aid_diff; target = tmask_a2; } int mask = 0; if (aid_diff <= 32) mask = 1 << (aid_diff - 1); real_pw valid_pair = 1.0; if (mask != 0 && (mask & target) == mask) valid_pair = 0.0; return valid_pair; } __device__ real_pw cal_pair(real_pw & w1, real_pw & w2, real_pw & w3, real_pw & ene_vdw, real_pw & ene_ele, const real4 &crd_chg1, const real4 &crd_chg2, const int & atomtype1, const int & atomtype2, const real_pw *__restrict__ d_lj_6term, const real_pw *__restrict__ d_lj_12term) { const real_pw d12[3] = {crd_chg1.x - crd_chg2.x, crd_chg1.y - crd_chg2.y, crd_chg1.z - crd_chg2.z}; const real_pw r12_2 = d12[0] * d12[0] + d12[1] * d12[1] + d12[2] * d12[2]; const real_pw r12 = sqrt(r12_2); if (r12 >= D_CUTOFF) { return r12; } const real_pw r12_inv = 1.0 / r12; const real_pw r12_2_inv = r12_inv * r12_inv; const real_pw r12_3_inv = r12_inv * r12_2_inv; const real_pw r12_6_inv = r12_3_inv * r12_3_inv; const real_pw r12_12_inv = r12_6_inv * r12_6_inv; const real_pw term6 = d_lj_6term[atomtype1 * D_N_ATOMTYPES + atomtype2] * r12_6_inv; const real_pw term12 = d_lj_12term[atomtype1 * D_N_ATOMTYPES + atomtype2] * r12_12_inv; real_pw work_coef = r12_2_inv * (-12.0 * term12 + 6.0 * term6); const real_pw cc = crd_chg1.w * crd_chg2.w * D_CHARGE_COEFF; work_coef -= cc * (r12_3_inv - D_FCOEFF); w1 = (work_coef)*d12[0]; w2 = (work_coef)*d12[1]; w3 = (work_coef)*d12[2]; ene_ele = cc * (r12_inv - D_ZCORE + D_BCOEFF * r12_2); ene_vdw = (-term6 + term12); return r12; } __global__ void kernel_pairwise_ljzd(const real4 *d_crd_chg, CellPair * d_cell_pairs, const int * d_idx_head_cell_pairs, const int * d_atomtype, const real_pw *__restrict__ d_lj_6term, const real_pw *__restrict__ d_lj_12term, real_fc *d_energy, real_fc *d_work) { // const bool flg_mod_15mask){ real_fc ene_vdw = 0.0; real_fc ene_ele = 0.0; const int global_threadIdx = blockDim.x * blockIdx.x + threadIdx.x; const int c1 = global_threadIdx >> 5; const int warpIdx = threadIdx.x >> 5; if (c1 >= D_N_CELLS) { return; } const int laneIdx = global_threadIdx & 31; const int n_loops = (d_idx_head_cell_pairs[c1 + 1] - d_idx_head_cell_pairs[c1]) * 2; const int ene_index_offset = global_threadIdx % N_MULTI_WORK; real_fc work_c1[3] = {0.0, 0.0, 0.0}; const int atom_idx1 = (laneIdx & 7); // laneIdx%8 const int a1 = c1 * N_ATOM_CELL + atom_idx1; __shared__ real4 crd_chg1[N_ATOM_CELL * (PW_THREADS >> 5)]; __shared__ int atomtype1[N_ATOM_CELL * (PW_THREADS >> 5)]; const int sharedmem_idx = N_ATOM_CELL * warpIdx + atom_idx1; if (laneIdx < N_ATOM_CELL) { crd_chg1[sharedmem_idx] = d_crd_chg[c1 * N_ATOM_CELL + laneIdx]; atomtype1[sharedmem_idx] = d_atomtype[c1 * N_ATOM_CELL + laneIdx]; } __syncthreads(); CellPair cellpair; int cp; for (int loopIdx = 0; loopIdx < n_loops; loopIdx++) { if (loopIdx % 2 == 0) { if (laneIdx == 0) { cp = d_idx_head_cell_pairs[c1] + (loopIdx >> 1); if (cp >= D_N_CELL_PAIRS) break; cellpair = d_cell_pairs[cp]; } cp = __shfl(cp, 0); cellpair.cell_id1 = __shfl(cellpair.cell_id1, 0); cellpair.cell_id2 = __shfl(cellpair.cell_id2, 0); cellpair.image = __shfl(cellpair.image, 0); cellpair.pair_mask[0] = __shfl(cellpair.pair_mask[0], 0); cellpair.pair_mask[1] = __shfl(cellpair.pair_mask[1], 0); } if (cellpair.cell_id1 != c1) break; const int c2 = cellpair.cell_id2; // atom_idx ... index in cell, 0-7 const int atom_idx2 = (laneIdx >> 3) + 4 * (loopIdx % 2); // laneIdx/8 + 4*(warpIdx%2) // remove 1-2, 1-3, 1-4 pairs const int a2 = c2 * N_ATOM_CELL + atom_idx2; real4 crd_chg2; int atomtype2; if (atom_idx1 == 0) { crd_chg2 = d_crd_chg[a2]; atomtype2 = d_atomtype[a2]; if ((cellpair.image & 1) == 1) crd_chg2.x -= PBC_L[0]; else if ((cellpair.image & 2) == 2) crd_chg2.x += PBC_L[0]; if ((cellpair.image & 4) == 4) crd_chg2.y -= PBC_L[1]; else if ((cellpair.image & 8) == 8) crd_chg2.y += PBC_L[1]; if ((cellpair.image & 16) == 16) crd_chg2.z -= PBC_L[2]; else if ((cellpair.image & 32) == 32) crd_chg2.z += PBC_L[2]; } int atomid2_top = laneIdx - laneIdx % 8; crd_chg2.x = __shfl(crd_chg2.x, laneIdx - atom_idx1); crd_chg2.y = __shfl(crd_chg2.y, laneIdx - atom_idx1); crd_chg2.z = __shfl(crd_chg2.z, laneIdx - atom_idx1); crd_chg2.w = __shfl(crd_chg2.w, laneIdx - atom_idx1); atomtype2 = __shfl(atomtype2, laneIdx - atom_idx1); real_pw w1 = 0.0, w2 = 0.0, w3 = 0.0; real_pw cur_ene_ele = 0.0; real_pw cur_ene_vdw = 0.0; int mask_id; int interact_bit; if (!check_15off64(atom_idx1, atom_idx2, cellpair.pair_mask, mask_id, interact_bit)) { real_pw r12 = cal_pair(w1, w2, w3, cur_ene_vdw, cur_ene_ele, // d_crd_chg[a1], crd_chg1[sharedmem_idx], crd_chg2, // d_atomtype[a1], atomtype1[sharedmem_idx], atomtype2, d_lj_6term, d_lj_12term); // if(flg_mod_15mask && r12 < D_CUTOFF_PAIRLIST) interact_bit = 0; ene_vdw += cur_ene_vdw; ene_ele += cur_ene_ele; work_c1[0] += w1; work_c1[1] += w2; work_c1[2] += w3; } /*if(flg_mod_15mask){ for(int i = 32; i >= 1; i/=2){ interact_bit |= __shfl_xor(interact_bit, i); } if(laneIdx == 0) d_cell_pairs[cp].pair_mask[mask_id] |= interact_bit; }*/ for (int i = 4; i >= 1; i /= 2) { w1 += shfl_xor(w1, i, 8); w2 += shfl_xor(w2, i, 8); w3 += shfl_xor(w3, i, 8); } if (laneIdx % 8 == 0) { // && (w1 != 0.0 || w2 != 0.0 || w3 != 0.0)){ const int tmp_index = (((global_threadIdx / WARPSIZE) % N_MULTI_WORK) * D_N_ATOM_ARRAY + a2) * 3; atomicAdd2(&(d_work[tmp_index + 0]), -w1); atomicAdd2(&(d_work[tmp_index + 1]), -w2); atomicAdd2(&(d_work[tmp_index + 2]), -w3); } } for (int i = 16; i >= 8; i /= 2) { work_c1[0] += shfl_xor(work_c1[0], i, 32); work_c1[1] += shfl_xor(work_c1[1], i, 32); work_c1[2] += shfl_xor(work_c1[2], i, 32); } if (laneIdx < 8) { const int tmp_index = ((ene_index_offset * D_N_ATOM_ARRAY) + a1) * 3; atomicAdd2(&(d_work[tmp_index + 0]), work_c1[0]); atomicAdd2(&(d_work[tmp_index + 1]), work_c1[1]); atomicAdd2(&(d_work[tmp_index + 2]), work_c1[2]); } for (int i = 16; i >= 1; i /= 2) { ene_vdw += shfl_xor(ene_vdw, i, 32); ene_ele += shfl_xor(ene_ele, i, 32); } if (laneIdx == 0) { const int tmp_index = ((global_threadIdx / 32) % N_MULTI_WORK) * 2; atomicAdd2(&(d_energy[tmp_index + 0]), ene_vdw); atomicAdd2(&(d_energy[tmp_index + 1]), ene_ele); } } __global__ void set_idx_head_cell_pairs(const int *d_n_cell_pairs, int *d_idx_head_cell_pairs) { // n_rep: the number of cells to be processed in each thread. /* dbg0415 int n_rep = (D_N_CELLS + REORDER_THREADS - 1) / REORDER_THREADS; for (int i = 0; i < n_rep; i++) { const int idx_head = REORDER_THREADS * i; const int idx_write = idx_head + threadIdx.x; if (idx_write < D_N_CELLS) { if (idx_write > 0) { const int idx = ((d_n_cell_pairs[idx_write - 1] + CP_PER_THREAD - 1) / CP_PER_THREAD) * CP_PER_THREAD; d_idx_head_cell_pairs[idx_write] = idx; } else { d_idx_head_cell_pairs[idx_write] = 0; } } for (int j = 1; j < REORDER_THREADS; j *= 2) { const int idx = (threadIdx.x / j); if (idx_write < D_N_CELLS && idx % 2 == 1) { d_idx_head_cell_pairs[idx_write] += d_idx_head_cell_pairs[idx_head + idx * j - 1]; } __syncthreads(); } if (i > 0) { d_idx_head_cell_pairs[idx_write] += d_idx_head_cell_pairs[idx_head - 1]; } __syncthreads(); } if (threadIdx.x == 0) { const int idx = ((d_n_cell_pairs[D_N_CELLS - 1] + CP_PER_THREAD - 1) / CP_PER_THREAD) * CP_PER_THREAD; d_idx_head_cell_pairs[D_N_CELLS] = d_idx_head_cell_pairs[D_N_CELLS - 1] + idx; } */ if (threadIdx.x == 0) { //const int idx = ((d_n_cell_pairs[D_N_CELLS - 1] + CP_PER_THREAD - 1) / CP_PER_THREAD) * CP_PER_THREAD; //d_idx_head_cell_pairs[D_N_CELLS] = d_idx_head_cell_pairs[D_N_CELLS - 1] + idx; d_idx_head_cell_pairs[0] = 0; for(int i=1; i <= D_N_CELLS; i++){ d_idx_head_cell_pairs[i] = d_idx_head_cell_pairs[i-1] + d_n_cell_pairs[i-1]; //printf("n:%d head:%d\n", //d_n_cell_pairs[i] , d_idx_head_cell_pairs[i]); } } // printf("max cp: %d\n",idx_cp); } __global__ void pack_cellpairs_array(CellPair * d_cell_pairs, const CellPair *d_cell_pairs_buf, const int * d_n_cell_pairs, const int * d_idx_head_cell_pairs) { const int cp = blockDim.x * blockIdx.x + threadIdx.x; if (cp >= D_MAX_N_CELL_PAIRS) return; //if (cp >= D_N_CELL_PAIRS) return; const CellPair cellpair = d_cell_pairs_buf[cp]; //printf("dbg0414b cp:%d %d %d\n",cp,cellpair.cell_id1,cellpair.cell_id2); if (cellpair.cell_id1 < 0 || cellpair.cell_id2 < 0 || cellpair.cell_id1 >= D_N_CELLS || cellpair.cell_id2 >= D_N_CELLS) { return; } const int cp_in_cell1 = cp - cellpair.cell_id1 * D_MAX_N_CELL_PAIRS_PER_CELL; if (cp_in_cell1 >= d_n_cell_pairs[cellpair.cell_id1]) { printf("Error: cp_in_cell1:%d d_n_cell_pairs:%d cp:%d c1:%d head:%d\n", cp_in_cell1, d_n_cell_pairs[cellpair.cell_id1], cp, cellpair.cell_id1, d_idx_head_cell_pairs[cellpair.cell_id1]); return; } const int dest = d_idx_head_cell_pairs[cellpair.cell_id1] + cp_in_cell1; if (dest < cellpair.cell_id1 || dest >= D_MAX_N_CELL_PAIRS){ printf("!!?? dest: %d (%d-%d) cp_in_cell:%d head:%d\n", dest, cellpair.cell_id1, cellpair.cell_id2, cp_in_cell1, d_idx_head_cell_pairs[cellpair.cell_id1]); } d_cell_pairs[dest] = cellpair; // if(cellpair.cell_id1 == 2 || cellpair.cell_id1 ==3 ){ // printf("dbg0414kernelpack cp:%d c1:%d c2:%d incell:%d dest:%d head:%d\n", cp, cellpair.cell_id1,cellpair.cell_id2, cp_in_cell1, dest, d_idx_head_cell_pairs[cellpair.cell_id1]); // } } __global__ void kernel_reset_cellpairs(CellPair *d_cell_pairs, int *d_n_cell_pairs, const int n_cells) { const int cell1_id = blockDim.x * blockIdx.x + threadIdx.x; if (cell1_id >= n_cells) { return; } int n_cp1 = d_n_cell_pairs[cell1_id]; for (int cell2 = 0; cell2 < n_cp1; cell2++) { bool flg = true; int n_mask_int = (N_ATOM_CELL * N_ATOM_CELL + 31) / 32; const int cp = D_MAX_N_CELL_PAIRS_PER_CELL * cell1_id + cell2; for (int i = 0; i < n_mask_int; i++) flg &= (d_cell_pairs[cp].pair_mask[i] == ~0); if (flg) { d_n_cell_pairs[cell1_id]--; int cp_src = D_MAX_N_CELL_PAIRS_PER_CELL * cell1_id + --n_cp1; d_cell_pairs[cp] = d_cell_pairs[cp_src]; } } d_n_cell_pairs[cell1_id] = n_cp1; } extern "C" int cuda_pairwise_ljzd(const bool flg_mod_15mask) { HANDLE_ERROR(hipMemset(d_energy, 0.0, sizeof(real_fc) * 2 * N_MULTI_WORK)); HANDLE_ERROR(hipMemset(d_work, 0.0, sizeof(real_fc) * max_n_atom_array * 3 * N_MULTI_WORK)); hipStreamCreate(&stream_pair_home); const int blocks = (n_cells + PW_THREADS / 32 - 1) / (PW_THREADS / 32); hipLaunchKernelGGL(( kernel_pairwise_ljzd), dim3(blocks), dim3(PW_THREADS), 0, stream_pair_home, d_crd_chg, d_cell_pairs, d_idx_head_cell_pairs, d_atomtype, d_lj_6term, d_lj_12term, d_energy, d_work); // if(flg_mod_15mask){ // const int blocks2 = (n_cal_cells+PW_THREADS-1) / PW_THREADS; // kernel_reset_cellpairs<<<blocks2, PW_THREADS, 0, stream_pair_home>>> //(d_cell_pairs, d_n_cell_pairs, n_cal_cells); //} return 0; } extern "C" int cuda_thread_sync() { hipDeviceSynchronize(); return 0; } extern "C" int cuda_pair_sync() { hipStreamSynchronize(stream_pair_home); hipStreamDestroy(stream_pair_home); return 0; } extern "C" int cuda_memcpy_dtoh_work(real_fc *&h_work, real_fc *&h_energy, int n_atoms, int n_atom_array) { // printf("! cuda_memcpy_dtoh_work\n"); int blocks = (n_atom_array + REORDER_THREADS - 1) / REORDER_THREADS; // printf("kernel_set_work_orig\n"); if(N_MULTI_WORK > 1) { hipStream_t stream_reduction1; hipStream_t stream_reduction2; hipStreamCreate(&stream_reduction1); hipStreamCreate(&stream_reduction2); hipLaunchKernelGGL(( kernel_set_work_orig), dim3(blocks), dim3(REORDER_THREADS), 0, stream_reduction1, d_work, d_atomids); hipLaunchKernelGGL(( kernel_reduction_energy), dim3(1), dim3(REORDER_THREADS), 0, stream_reduction2, d_energy); hipStreamSynchronize(stream_reduction1); hipStreamSynchronize(stream_reduction2); hipStreamDestroy(stream_reduction1); hipStreamDestroy(stream_reduction2); } HANDLE_ERROR(hipMemcpy(h_work, d_work, sizeof(real_fc) * n_atom_array * 3, hipMemcpyDeviceToHost)); HANDLE_ERROR(hipMemcpy(h_energy, d_energy, sizeof(real_fc) * 2, hipMemcpyDeviceToHost)); //printf("cuda ene %f %f\n",h_energy[0], h_energy[1]); return 0; } int cuda_reset_work_ene() { HANDLE_ERROR(hipMemset(d_work, 0.0, sizeof(real_fc) * max_n_atom_array * 3 * N_MULTI_WORK)); HANDLE_ERROR(hipMemset(d_energy, 0.0, sizeof(real_fc) * 2 * N_MULTI_WORK)); return 0; } __device__ int get_column_id_from_crd(const int x, const int y) { return y * D_N_CELLS_XYZ[0] + x; } __device__ bool check_valid_pair(const int cell1_id, const int cell2_id) { const bool cell1_odd = cell1_id % 2 != 0; const bool cell2_odd = cell2_id % 2 != 0; if (cell1_odd) { if ((cell2_id < cell1_id && !cell2_odd) || (cell2_id > cell1_id && cell2_odd)) return false; } else { if ((cell2_id < cell1_id && cell2_odd) || (cell2_id > cell1_id && !cell2_odd)) return false; } return true; } __device__ int set_cell_pair_bitmask(const int cell_id1, const int cell_id2, const int cell1_id_in_block, const int *s_nb15off, const int n_atom_cell2, int * pair_mask) { for (int i = 0; i < N_BITMASK; i++) pair_mask[i] = 0; int a1 = N_ATOM_CELL * cell_id1; for (int a1_cell = 0; a1_cell < N_ATOM_CELL; a1++, a1_cell++) { bool flg1 = false; if (s_nb15off[(cell1_id_in_block * N_ATOM_CELL + a1_cell) * D_MAX_N_NB15OFF] == a1) flg1 = true; int a2 = N_ATOM_CELL * cell_id2; for (int a2_cell = 0; a2_cell < N_ATOM_CELL; a2++, a2_cell++) { const int bit_pos = a2_cell * N_ATOM_CELL + a1_cell; const int mask_id = bit_pos / 32; const int mask_pos = bit_pos % 32; const int add_bit = 1 << mask_pos; bool flg12 = false; if (flg1) flg12 = true; if (a2_cell >= n_atom_cell2) flg12 = true; else if ((cell_id1 == cell_id2 && a1 >= a2)) flg12 = true; else { const int tail = (cell1_id_in_block * N_ATOM_CELL + a1_cell + 1) * D_MAX_N_NB15OFF; for (int i = tail - D_MAX_N_NB15OFF; i < tail && s_nb15off[i] != -1; i++) { if (s_nb15off[i] == a2) { flg12 = true; break; } } } if (flg12) { pair_mask[mask_id] |= add_bit; } } } return 0; } __device__ CellPair get_new_cell_pair(const int cell1_id, const int cell2_id, const int image[3]) { CellPair new_cp; new_cp.cell_id1 = cell1_id; new_cp.cell_id2 = cell2_id; int bit_image = 0; if (image[0] == -1) bit_image = bit_image | 1; else if (image[0] == 1) bit_image = bit_image | 2; if (image[1] == -1) bit_image = bit_image | 4; else if (image[1] == 1) bit_image = bit_image | 8; if (image[2] == -1) bit_image = bit_image | 16; else if (image[2] == 1) bit_image = bit_image | 32; new_cp.image = bit_image; new_cp.pair_mask[0] = ~0; new_cp.pair_mask[1] = ~0; return new_cp; } __global__ void kernel_enumerate_cell_pair(const real4 *d_crd_chg, const real2 *d_cell_z, const int * d_idx_xy_head_cell, const int * d_idx_cell_column, const int * d_atomids, const int * d_nb15off_orig, const int * d_nb15off, int * d_n_cell_pairs, CellPair * d_cell_pairs) { // 1 warp calculates pairs with a cell const int g_thread_id = (threadIdx.x + blockIdx.x * blockDim.x); const int cell1_id = g_thread_id / D_N_NEIGHBOR_COL; if (cell1_id >= D_N_CELLS) return; const int neighbor_col_id = g_thread_id % D_N_NEIGHBOR_COL; int cell1_crd[3]; const int col1_id = d_idx_cell_column[cell1_id]; cell1_crd[0] = col1_id % D_N_CELLS_XYZ[0]; cell1_crd[1] = col1_id / D_N_CELLS_XYZ[0]; cell1_crd[2] = cell1_id - d_idx_xy_head_cell[col1_id]; const real4 crd_chg11 = d_crd_chg[cell1_id * N_ATOM_CELL]; const real_pw cell1_z_bottom = crd_chg11.z; // const real_pw cell1_z_top = d_cell_z[cell1_id].y; const real_pw cell1_z_top = d_crd_chg[(cell1_id + 1) * N_ATOM_CELL - 1].z; int image[3] = {0, 0, 0}; const int idx_cell_pair_head = D_MAX_N_CELL_PAIRS_PER_CELL * cell1_id; int d_cell[3]; d_cell[0] = neighbor_col_id % (D_N_NEIGHBOR_XYZ[0] * 2 + 1) - D_N_NEIGHBOR_XYZ[0]; d_cell[1] = neighbor_col_id / (D_N_NEIGHBOR_XYZ[0] * 2 + 1) - D_N_NEIGHBOR_XYZ[1]; int rel_x[3]; int cell2_crd[3]; real_pw dx[3] = {0.0, 0.0, 0.0}; for (int d = 0; d < 2; d++) { image[d] = 0; rel_x[d] = cell1_crd[d] + d_cell[d]; cell2_crd[d] = rel_x[d]; if (rel_x[d] < 0) { image[d] = -1; cell2_crd[d] = D_N_CELLS_XYZ[d] + rel_x[d]; } else if (rel_x[d] >= D_N_CELLS_XYZ[d]) { image[d] = 1; cell2_crd[d] = rel_x[d] - D_N_CELLS_XYZ[d]; } if (d_cell[d] > 0) dx[d] = (d_cell[d] - 1) * D_L_CELL_XYZ[d]; else if (d_cell[d] < 0) dx[d] = (d_cell[d] + 1) * D_L_CELL_XYZ[d]; dx[d] = dx[d] * dx[d]; } /* 20150612-- */ const int col2_id = cell2_crd[0] + cell2_crd[1] * D_N_CELLS_XYZ[0]; const int n_cells_in_col2 = d_idx_xy_head_cell[col2_id + 1] - d_idx_xy_head_cell[col2_id]; int inc = 1; bool flg_up = true; bool flg_down = true; for (d_cell[2] = 0; flg_up || flg_down; d_cell[2] += inc) { rel_x[2] = cell1_crd[2] + d_cell[2]; image[2] = 0; cell2_crd[2] = rel_x[2]; if (rel_x[2] < 0) { image[2] = -1; cell2_crd[2] += n_cells_in_col2; } else if (rel_x[2] >= n_cells_in_col2) { image[2] = 1; cell2_crd[2] -= n_cells_in_col2; } const int cell2_id = d_idx_xy_head_cell[col2_id] + cell2_crd[2]; // const real_pw cell2_z_bottom = d_cell_z[cell2_id].x + image[2] * PBC_L[2]; // const real_pw cell2_z_top = d_cell_z[cell2_id].y + image[2] * PBC_L[2]; const real_pw cell2_z_bottom = d_crd_chg[cell2_id * N_ATOM_CELL].z + image[2] * PBC_L[2]; const real_pw cell2_z_top = d_crd_chg[(cell2_id + 1) * N_ATOM_CELL - 1].z + image[2] * PBC_L[2]; if (cell2_z_top < cell1_z_bottom) { dx[2] = cell1_z_bottom - cell2_z_top; dx[2] = dx[2] * dx[2]; if (inc == -1 && dx[0] + dx[1] + dx[2] > D_CUTOFF_PAIRLIST_2) { flg_down = false; } } else if (cell2_z_bottom > cell1_z_top) { dx[2] = cell2_z_bottom - cell1_z_top; dx[2] = dx[2] * dx[2]; if (inc == 1 && dx[0] + dx[1] + dx[2] > D_CUTOFF_PAIRLIST_2) { d_cell[2] = 0; inc = -1; flg_up = false; } } else { dx[2] = 0.0; } if (dx[0] + dx[1] + dx[2] < D_CUTOFF_PAIRLIST_2) { if (check_valid_pair(cell1_id, cell2_id)) { const int cp_idx_cell = atomicAdd(&d_n_cell_pairs[cell1_id], 1); if (cp_idx_cell >= D_MAX_N_CELL_PAIRS_PER_CELL) { printf("Index exceeds the maximum value. %d / %d\n", cp_idx_cell, D_MAX_N_CELL_PAIRS_PER_CELL); } d_cell_pairs[idx_cell_pair_head + cp_idx_cell] = get_new_cell_pair(cell1_id, cell2_id, image); } } } } __global__ void kernel_init_cell_pairs(CellPair *d_cell_pairs, CellPair *d_cell_pairs_buf) { const int cp_id = threadIdx.x + blockDim.x * blockIdx.x; if (cp_id >= D_MAX_N_CELL_PAIRS) return; CellPair new_cp; new_cp.cell_id1 = -1; new_cp.cell_id2 = -1; new_cp.image = 0; for (int i = 0; i < N_BITMASK; i++) { new_cp.pair_mask[i] = ~0; } d_cell_pairs[cp_id] = new_cp; d_cell_pairs_buf[cp_id] = new_cp; } __global__ void set_cell_pairs_nb15off(const int *d_idx_head_cell_pairs, const int *d_nb15off, const int *d_atomids, const int n_cell_pairs, CellPair * d_cell_pairs) { const int cp_block_first = blockIdx.x * blockDim.x; const int cp = threadIdx.x + cp_block_first; if (cp >= n_cell_pairs) return; //printf("dbg0415a cp : %d cp_block_first : %d blockIdx: %d blockDim: %d threadIdx : %d\n",cp, cp_block_first,blockIdx.x, blockDim.x, threadIdx.x); const int cell1_id = d_cell_pairs[cp].cell_id1; const int cell2_id = d_cell_pairs[cp].cell_id2; if (cell1_id < 0 || cell2_id < 0) return; const int cell1_id_in_block = cell1_id - d_cell_pairs[cp_block_first].cell_id1; //if(cell1_id_in_block < 0){ //printf("dbg0415a2 %d %d %d %d t:%d bid:%d bdim:%d\n",cell1_id_in_block, cell1_id, cp_block_first, d_cell_pairs[cp_block_first].cell_id1, threadIdx.x, blockIdx.x, blockDim.x); //} if(cp_block_first >= n_cell_pairs){ printf("ERROR! cp_block_first >= n_cell_pairs: \n %d %d\n",cp_block_first, n_cell_pairs); } //printf("val1: %d val2: %d\n", cell1_id * N_ATOM_CELL * MAX_N_NB15OFF + i, cp_block_first) if (cell1_id_in_block >= MAX_N_CELL_BLOCK) { printf("The number of cells in each block exceeds the constant MAX_N_CELL_BLOCK: %d / %d\ncell: %d %d - %d " "cp_block_first:%d " "c1_in_first:%d\n", cell1_id_in_block, MAX_N_CELL_BLOCK, cp, cell1_id, cell2_id, cp_block_first, d_cell_pairs[cp_block_first].cell_id1); } __shared__ int s_nb15off[MAX_N_CELL_BLOCK * N_ATOM_CELL * MAX_N_NB15OFF]; //printf("dbg0415b %d %d %d %d %d %d\n", threadIdx.x, cp, cell1_id, //cell1_id_in_block * N_ATOM_CELL * MAX_N_NB15OFF, //MAX_N_CELL_BLOCK * N_ATOM_CELL * MAX_N_NB15OFF, //cell1_id * N_ATOM_CELL * MAX_N_NB15OFF); if (threadIdx.x == 0 || d_cell_pairs[cp - 1].cell_id1 != cell1_id) { for (int i = 0; i < N_ATOM_CELL * MAX_N_NB15OFF; i++) { s_nb15off[cell1_id_in_block * N_ATOM_CELL * MAX_N_NB15OFF + i] = d_nb15off[cell1_id * N_ATOM_CELL * MAX_N_NB15OFF + i]; } //printf("dbg0415c d_nb15off %d %d\n",cell1_id * N_ATOM_CELL * MAX_N_NB15OFF, //cell1_id * N_ATOM_CELL * MAX_N_NB15OFF + N_ATOM_CELL*MAX_N_NB15OFF-1); } __syncthreads(); int n_atom_cell2 = 0; const int tail = (cell2_id + 1) * N_ATOM_CELL; for (int at2 = tail - N_ATOM_CELL; at2 < tail; at2++) { if (d_atomids[at2] >= 0) n_atom_cell2++; else break; } set_cell_pair_bitmask(cell1_id, cell2_id, cell1_id_in_block, s_nb15off, n_atom_cell2, d_cell_pairs[cp].pair_mask); } int set_idx_cell_column(const int *idx_atom_cell_xy, const int n_cells, const int *h_atomids) { for (int cell_id = 0; cell_id < n_cells; cell_id++) { h_idx_cell_column[cell_id] = idx_atom_cell_xy[cell_id * N_ATOM_CELL]; // printf("cell: %d col: %d atid: %d \n", // cell_id, h_idx_cell_column[cell_id], // h_atomids[cell_id*N_ATOM_CELL]); } return 0; } extern "C" int cuda_enumerate_cell_pairs(int *& h_atomids, const int n_cells, // const int n_uni, const int n_neighbor_col, const int *idx_atom_cell_xy) { // printf("dbg0413 cuda_enumerate_cell_pairs_01\n"); // cudaMemoryTest(); set_idx_cell_column(idx_atom_cell_xy, n_cells, h_atomids); // printf("dbg0413 cuda_enumerate_cell_pairs_02\n"); // cudaMemoryTest(); HANDLE_ERROR(hipMemcpy(d_idx_cell_column, h_idx_cell_column, n_cells * sizeof(int), hipMemcpyHostToDevice)); // printf("dbg0413 cuda_enumerate_cell_pairs_03\n"); // cudaMemoryTest(); //hipStream_t stream1; // hipStream_t stream2; //hipStreamCreate(&stream1); // hipStreamCreate(&stream2); // HANDLE_ERROR( hipMemset(d_cell_pairs, -1, sizeof(int)*5*D_MAX_N_CELL_PAIRS)); // HANDLE_ERROR( hipMemset(d_cell_pairs_buf, -1, sizeof(int)*5*D_MAX_N_CELL_PAIRS)); const int blocks3 = (max_n_cell_pairs + REORDER_THREADS - 1) / REORDER_THREADS; hipLaunchKernelGGL(( kernel_init_cell_pairs), dim3(blocks3), dim3(REORDER_THREADS), 0, 0, d_cell_pairs, d_cell_pairs_buf); hipDeviceSynchronize(); // printf("dbg0413 cuda_enumerate_cell_pairs_04\n"); //cudaMemoryTest(); const int blocks4 = (n_neighbor_col * n_cells + REORDER_THREADS - 1) / REORDER_THREADS; // printf("bbb %d\n", max_n_cell_pairs); hipLaunchKernelGGL(( kernel_enumerate_cell_pair), dim3(blocks4), dim3(REORDER_THREADS), 0, 0, // d_uni2cell_z, d_crd_chg, d_cell_z, d_idx_xy_head_cell, d_idx_cell_column, d_atomids, d_nb15off_orig, d_nb15off, d_n_cell_pairs, d_cell_pairs_buf); // d_cell_pairs); //printf("dbg0413 cuda_enumerate_cell_pairs_05\n"); // cudaMemoryTest(); hipDeviceSynchronize(); hipLaunchKernelGGL(( set_idx_head_cell_pairs), dim3(1), dim3(REORDER_THREADS), 0, 0, d_n_cell_pairs, d_idx_head_cell_pairs); hipDeviceSynchronize(); // n_cell_pairs = d_idx_head_cell_pairs[n_cells+1]; HANDLE_ERROR(hipMemcpy(&n_cell_pairs, &d_idx_head_cell_pairs[n_cells], sizeof(int), hipMemcpyDeviceToHost)); // printf("dbg0413 cuda_enumerate_cell_pairs_06\n"); //cudaMemoryTest(); HANDLE_ERROR(hipMemcpyToSymbol(D_N_CELL_PAIRS, &n_cell_pairs, sizeof(int))); //printf("dbg0413 cuda_enumerate_cell_pairs_07\n"); //cudaMemoryTest(); hipLaunchKernelGGL(( pack_cellpairs_array), dim3(blocks3), dim3(REORDER_THREADS), 0, 0, d_cell_pairs, d_cell_pairs_buf, d_n_cell_pairs, d_idx_head_cell_pairs); hipDeviceSynchronize(); //printf("dbg0413 cuda_enumerate_cell_pairs_08\n"); //cudaMemoryTest(); // debug code //int* h_n_cell_pairs; //HANDLE_ERROR(hipHostMalloc((void **)&h_n_cell_pairs, max_n_cells * sizeof(int), hipHostMallocDefault)); //HANDLE_ERROR(hipMemcpy(h_n_cell_pairs, d_n_cell_pairs, max_n_cells * sizeof(int), hipMemcpyDeviceToHost)); // for(int i_ncp=0; i_ncp < max_n_cells; i_ncp++){ //printf("dbg0414ncp cp %d: %d\n", i_ncp, h_n_cell_pairs[i_ncp]); //} //int* h_idx_head; //HANDLE_ERROR(hipHostMalloc((void **)&h_idx_head, (max_n_cells+1) * sizeof(int), hipHostMallocDefault)); //HANDLE_ERROR(hipMemcpy(h_idx_head, d_idx_head_cell_pairs, (max_n_cells+1) * sizeof(int), hipMemcpyDeviceToHost)); //for(int i_ncp=0; i_ncp < max_n_cells+1; i_ncp++){ //printf("dbg0414headidx %d: %d\n", i_ncp, h_idx_head[i_ncp]); //} //dbg0420 /* for(int i_ncp=1; i_ncp < n_cells+1; i_ncp++){ if(h_idx_head[i_ncp] <= h_idx_head[i_ncp-1]){ for(int j_ncp=1; j_ncp < max_n_cells+1; j_ncp++){ printf("dbg0414headidx %d: %d ", j_ncp, h_idx_head[j_ncp]); if (i_ncp == j_ncp){ printf(" *here*"); } printf("\n"); } for(int j_ncp=0; j_ncp < max_n_cells; j_ncp++){ printf("dbg0414ncp cp %d: %d\n", j_ncp, h_n_cell_pairs[j_ncp]); } break; } } */ //HANDLE_ERROR(hipHostFree(h_n_cell_pairs)); //HANDLE_ERROR(hipHostFree(h_idx_head)); //CellPair* h_cp; //HANDLE_ERROR(hipHostMalloc((void **)&h_cp, n_cell_pairs * sizeof(CellPair), hipHostMallocDefault)); //HANDLE_ERROR(hipMemcpy(h_cp, d_cell_pairs_buf, n_cell_pairs * sizeof(CellPair), hipMemcpyDeviceToHost)); //for(int i_cp=0; i_cp < n_cell_pairs+10; i_cp++){ //printf("dbg0414a cp %d: %d %d\n", i_cp, h_cp[i_cp].cell_id1, h_cp[i_cp].cell_id2); //} //HANDLE_ERROR(hipMemcpy(h_cp, d_cell_pairs, n_cell_pairs * sizeof(CellPair), hipMemcpyDeviceToHost)); //for(int i_cp=0; i_cp < n_cell_pairs+10; i_cp++){ //printf("dbg0414b cp %d: %d %d\n", i_cp, h_cp[i_cp].cell_id1, h_cp[i_cp].cell_id2); //} //HANDLE_ERROR(hipHostFree(h_cp)); /// ! debug code const int blocks5 = (n_cell_pairs + REORDER_THREADS - 1) / REORDER_THREADS; hipLaunchKernelGGL(( set_cell_pairs_nb15off), dim3(blocks5), dim3(REORDER_THREADS), 0, 0, d_idx_head_cell_pairs, d_nb15off, d_atomids, n_cell_pairs, d_cell_pairs); hipDeviceSynchronize(); //printf("dbg0413 cuda_enumerate_cell_pairs_09\n"); //cudaMemoryTest(); return 0; } extern "C" int cuda_memcpy_htod_cell_pairs(CellPair *&h_cell_pairs, int *&h_idx_head_cell_pairs, int n_cell_pairs) { // printf("cuda_memcpy_htod_cell_pairs\n"); HANDLE_ERROR(hipMemcpy(d_cell_pairs, h_cell_pairs, n_cell_pairs * sizeof(CellPair), hipMemcpyHostToDevice)); // HANDLE_ERROR( hipMemset(d_idx_head_cell_pairs, -1, sizeof(int)*(max_n_cells+1)); HANDLE_ERROR( hipMemcpy(d_idx_head_cell_pairs, h_idx_head_cell_pairs, (n_cells + 1) * sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpyToSymbol(D_N_CELL_PAIRS, &n_cell_pairs, sizeof(int))); return 0; } extern "C" int cuda_alloc_set_hps_params(real_pw* h_hps_cutoff, real_pw* h_hps_lambda, int n_lj_types){ // printf("threads : %d\n", PW_THREADS); printf("cuda_alloc_set_hps_params\n"); const unsigned int size_lj_matrix = sizeof(real_pw) * n_lj_types * n_lj_types; // hipMalloc HANDLE_ERROR(hipMalloc((void **)&d_hps_cutoff, size_lj_matrix)); HANDLE_ERROR(hipMalloc((void **)&d_hps_lambda, size_lj_matrix)); HANDLE_ERROR(hipMemcpy(d_hps_cutoff, h_hps_cutoff, size_lj_matrix, hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_hps_lambda, h_hps_lambda, size_lj_matrix, hipMemcpyHostToDevice)); //printf("cudadbg %d\n", d_hps_lambda[0]); return 0; } extern "C" int cuda_free_hps_params() { // printf("cuda_free_lj_param\n"); // hipUnbindTexture(tex_lj_6term); // hipUnbindTexture(tex_lj_12term); HANDLE_ERROR(hipFree(d_hps_cutoff)); HANDLE_ERROR(hipFree(d_hps_lambda)); return 0; } extern "C" int cuda_hps_constant(real_pw hps_eps){ printf("set_cuda_hps_constant\n"); HANDLE_ERROR(hipMemcpyToSymbol(D_HPS_EPS, &hps_eps, sizeof(real_pw))); return 0; } extern "C" int cuda_debye_huckel_constant(real_pw in_dielect, real_pw in_temperature, real_pw in_ionic_strength){ printf("cuda_debye_huckel_constant (cuda)\n"); debye_length_inv = (1.0/(sqrt(PERMITTIVITY*in_dielect*BOLTZMAN*in_temperature/(2*AVOGADRO*ELEM_CHARGE*ELEM_CHARGE*in_ionic_strength))*1e10)); dielect_inv = 1.0 / in_dielect; HANDLE_ERROR(hipMemcpyToSymbol(D_DEBYE_LEN_INV, &debye_length_inv, sizeof(real_pw))); printf("cuda debye_len_inv : %f\n", debye_length_inv); HANDLE_ERROR(hipMemcpyToSymbol(D_DIELECT_INV, &dielect_inv, sizeof(real_pw))); printf("cuda dielect_inv : %f\n", dielect_inv); return 0; } __device__ real_pw cal_pair_hps_dh(real_pw & w1, real_pw & w2, real_pw & w3, real_pw & ene_vdw, real_pw & ene_ele, const real4 &crd_chg1, const real4 &crd_chg2, const int & atomtype1, const int & atomtype2, const real_pw *__restrict__ d_lj_6term, const real_pw *__restrict__ d_lj_12term, const real_pw *__restrict__ d_hps_cutoff, const real_pw *__restrict__ d_hps_lambda){ // const real_pw d12[3] = {crd_chg1.x - crd_chg2.x, crd_chg1.y - crd_chg2.y, crd_chg1.z - crd_chg2.z}; //for(int d=0; d < 3; d++){ real_pw tmp_d12 = crd_chg1.z - crd_chg2.z; tmp_d12 -= nearbyint(tmp_d12 * PBC_L_INV[2]) * PBC_L[2]; const real_pw d12[3] = {crd_chg1.x - crd_chg2.x, crd_chg1.y - crd_chg2.y, tmp_d12}; //d12[d] -= static_cast<int>(d12[d] * 1.0/PBC_L[d] + 0.5) * PBC_L[d]; //} const real_pw r12_2 = d12[0] * d12[0] + d12[1] * d12[1] + d12[2] * d12[2]; const real_pw r12 = sqrt(r12_2); if (r12 >= D_CUTOFF) { return r12; } const real_pw r12_inv = 1.0 / r12; const real_pw r12_2_inv = r12_inv * r12_inv; const real_pw r12_3_inv = r12_inv * r12_2_inv; const real_pw r12_6_inv = r12_3_inv * r12_3_inv; const real_pw r12_12_inv = r12_6_inv * r12_6_inv; const int pairtype = atomtype1 * D_N_ATOMTYPES + atomtype2; const real_pw hps_cutoff = d_hps_cutoff[pairtype]; const real_pw hps_lambda = d_hps_lambda[pairtype]; const real_pw term6 = d_lj_6term[pairtype] * r12_6_inv; const real_pw term12 = d_lj_12term[pairtype] * r12_12_inv; real_pw work_coef = r12_2_inv * (-12.0 * term12 + 6.0 * term6); ene_vdw = (-term6 + term12); if(r12 > hps_cutoff){ ene_vdw *= hps_lambda; work_coef *= hps_lambda; }else{ ene_vdw += (1-hps_lambda) * D_HPS_EPS; } const real_pw cc = crd_chg1.w * crd_chg2.w * D_CHARGE_COEFF; const real_pw r12_ld_exp = exp(-r12 * D_DEBYE_LEN_INV); ene_ele = cc * r12_inv * D_DIELECT_INV * r12_ld_exp; work_coef -= ene_ele * (r12_inv + D_DEBYE_LEN_INV); w1 = (work_coef)*d12[0]; w2 = (work_coef)*d12[1]; w3 = (work_coef)*d12[2]; return r12; } __global__ void kernel_pairwise_hps_dh(const real4 *d_crd_chg, CellPair * d_cell_pairs, const int * d_idx_head_cell_pairs, const int * d_atomtype, const real_pw *__restrict__ d_lj_6term, const real_pw *__restrict__ d_lj_12term, const real_pw *__restrict__ d_hps_cutoff, const real_pw *__restrict__ d_hps_lambda, real_fc *d_energy, real_fc *d_work) { // const bool flg_mod_15mask){ real_fc ene_vdw = 0.0; real_fc ene_ele = 0.0; const int global_threadIdx = blockDim.x * blockIdx.x + threadIdx.x; const int c1 = global_threadIdx >> 5; const int warpIdx = threadIdx.x >> 5; if (c1 >= D_N_CELLS) { return; } const int laneIdx = global_threadIdx & 31; const int n_loops = (d_idx_head_cell_pairs[c1 + 1] - d_idx_head_cell_pairs[c1]) * 2; const int ene_index_offset = global_threadIdx % N_MULTI_WORK; real_fc work_c1[3] = {0.0, 0.0, 0.0}; const int atom_idx1 = (laneIdx & 7); // laneIdx%8 const int a1 = c1 * N_ATOM_CELL + atom_idx1; __shared__ real4 crd_chg1[N_ATOM_CELL * (PW_THREADS >> 5)]; __shared__ int atomtype1[N_ATOM_CELL * (PW_THREADS >> 5)]; const int sharedmem_idx = N_ATOM_CELL * warpIdx + atom_idx1; if (laneIdx < N_ATOM_CELL) { crd_chg1[sharedmem_idx] = d_crd_chg[c1 * N_ATOM_CELL + laneIdx]; atomtype1[sharedmem_idx] = d_atomtype[c1 * N_ATOM_CELL + laneIdx]; } __syncthreads(); CellPair cellpair; int cp; for (int loopIdx = 0; loopIdx < n_loops; loopIdx++) { if (loopIdx % 2 == 0) { if (laneIdx == 0) { cp = d_idx_head_cell_pairs[c1] + (loopIdx >> 1); if (cp >= D_N_CELL_PAIRS) break; cellpair = d_cell_pairs[cp]; } cp = __shfl(cp, 0); cellpair.cell_id1 = __shfl(cellpair.cell_id1, 0); cellpair.cell_id2 = __shfl(cellpair.cell_id2, 0); cellpair.image = __shfl(cellpair.image, 0); cellpair.pair_mask[0] = __shfl(cellpair.pair_mask[0], 0); cellpair.pair_mask[1] = __shfl(cellpair.pair_mask[1], 0); } if (cellpair.cell_id1 != c1) break; const int c2 = cellpair.cell_id2; // atom_idx ... index in cell, 0-7 const int atom_idx2 = (laneIdx >> 3) + 4 * (loopIdx % 2); // laneIdx/8 + 4*(warpIdx%2) // remove 1-2, 1-3, 1-4 pairs const int a2 = c2 * N_ATOM_CELL + atom_idx2; real4 crd_chg2; int atomtype2; if (atom_idx1 == 0) { crd_chg2 = d_crd_chg[a2]; atomtype2 = d_atomtype[a2]; if ((cellpair.image & 1) == 1) crd_chg2.x -= PBC_L[0]; else if ((cellpair.image & 2) == 2) crd_chg2.x += PBC_L[0]; if ((cellpair.image & 4) == 4) crd_chg2.y -= PBC_L[1]; else if ((cellpair.image & 8) == 8) crd_chg2.y += PBC_L[1]; /* if ((cellpair.image & 16) == 16) crd_chg2.z -= PBC_L[2]; else if ((cellpair.image & 32) == 32) crd_chg2.z += PBC_L[2]; */ } int atomid2_top = laneIdx - laneIdx % 8; crd_chg2.x = __shfl(crd_chg2.x, laneIdx - atom_idx1); crd_chg2.y = __shfl(crd_chg2.y, laneIdx - atom_idx1); crd_chg2.z = __shfl(crd_chg2.z, laneIdx - atom_idx1); crd_chg2.w = __shfl(crd_chg2.w, laneIdx - atom_idx1); atomtype2 = __shfl(atomtype2, laneIdx - atom_idx1); real_pw w1 = 0.0, w2 = 0.0, w3 = 0.0; real_pw cur_ene_ele = 0.0; real_pw cur_ene_vdw = 0.0; int mask_id; int interact_bit; if (!check_15off64(atom_idx1, atom_idx2, cellpair.pair_mask, mask_id, interact_bit)) { real_pw r12 = cal_pair_hps_dh(w1, w2, w3, cur_ene_vdw, cur_ene_ele, crd_chg1[sharedmem_idx], crd_chg2, atomtype1[sharedmem_idx], atomtype2, d_lj_6term, d_lj_12term, d_hps_cutoff, d_hps_lambda); ene_vdw += cur_ene_vdw; ene_ele += cur_ene_ele; work_c1[0] += w1; work_c1[1] += w2; work_c1[2] += w3; } for (int i = 4; i >= 1; i /= 2) { w1 += shfl_xor(w1, i, 8); w2 += shfl_xor(w2, i, 8); w3 += shfl_xor(w3, i, 8); } if (laneIdx % 8 == 0) { // && (w1 != 0.0 || w2 != 0.0 || w3 != 0.0)){ const int tmp_index = (((global_threadIdx / WARPSIZE) % N_MULTI_WORK) * D_N_ATOM_ARRAY + a2) * 3; atomicAdd2(&(d_work[tmp_index + 0]), -w1); atomicAdd2(&(d_work[tmp_index + 1]), -w2); atomicAdd2(&(d_work[tmp_index + 2]), -w3); } } for (int i = 16; i >= 8; i /= 2) { work_c1[0] += shfl_xor(work_c1[0], i, 32); work_c1[1] += shfl_xor(work_c1[1], i, 32); work_c1[2] += shfl_xor(work_c1[2], i, 32); } if (laneIdx < 8) { const int tmp_index = ((ene_index_offset * D_N_ATOM_ARRAY) + a1) * 3; atomicAdd2(&(d_work[tmp_index + 0]), work_c1[0]); atomicAdd2(&(d_work[tmp_index + 1]), work_c1[1]); atomicAdd2(&(d_work[tmp_index + 2]), work_c1[2]); } for (int i = 16; i >= 1; i /= 2) { ene_vdw += shfl_xor(ene_vdw, i, 32); ene_ele += shfl_xor(ene_ele, i, 32); } if (laneIdx == 0) { const int tmp_index = ((global_threadIdx / 32) % N_MULTI_WORK) * 2; atomicAdd2(&(d_energy[tmp_index + 0]), ene_vdw); atomicAdd2(&(d_energy[tmp_index + 1]), ene_ele); } } extern "C" int cuda_pairwise_hps_dh(const bool flg_mod_15mask) { HANDLE_ERROR(hipMemset(d_energy, 0.0, sizeof(real_fc) * 2 * N_MULTI_WORK)); HANDLE_ERROR(hipMemset(d_work, 0.0, sizeof(real_fc) * max_n_atom_array * 3 * N_MULTI_WORK)); hipStreamCreate(&stream_pair_home); const int blocks = (n_cells + PW_THREADS / 32 - 1) / (PW_THREADS / 32); hipLaunchKernelGGL(( kernel_pairwise_hps_dh), dim3(blocks), dim3(PW_THREADS), 0, stream_pair_home, d_crd_chg, d_cell_pairs, d_idx_head_cell_pairs, d_atomtype, d_lj_6term, d_lj_12term, d_hps_cutoff, d_hps_lambda, d_energy, d_work); hipDeviceSynchronize(); return 0; }
783adabb8d3e3d3db9327808ce5c47cd550352bf.cu
#include "cuda_setup.h" __device__ __inline__ double shfl_xor(double value, int const lane, int const warpsize) { return __hiloint2double(__shfl_xor(__double2hiint(value), lane, warpsize), __shfl_xor(__double2loint(value), lane, warpsize)); } __device__ double atomicAdd2(double *address, double val) { unsigned long long int *address_as_ull = (unsigned long long int *)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } extern "C" void cudaMemoryTest() { printf("cudamemorytest 0\n"); const unsigned int N = 1048576; const unsigned int bytes = N * sizeof(int); //int *h_a = (int*)malloc(bytes); int *h_a; int *d_a; HANDLE_ERROR(cudaHostAlloc((void **)&h_a, bytes, cudaHostAllocDefault)); //HANDLE_ERROR(cudaHostAlloc((void**)&h_a, bytes)); HANDLE_ERROR(cudaMalloc((void**)&d_a, bytes)); memset(h_a, 0, bytes); HANDLE_ERROR(cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(h_a, d_a, bytes, cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaFreeHost(h_a)); HANDLE_ERROR(cudaFree(d_a)); printf("cudamemorytest 1\n"); return; } extern "C" int cuda_alloc_atom_info(int in_max_n_atoms_exbox, // int in_max_n_atom_array, int in_max_n_cells, int in_max_n_cell_pairs, int in_n_columns) { printf("cuda_alloc_atom_info\n"); printf("d_crd : %d \n", max_n_atom_array * 3 * sizeof(real_pw)); max_n_atoms_exbox = in_max_n_atoms_exbox; // max_n_atom_array = in_max_n_atom_array; max_n_cell_pairs = in_max_n_cell_pairs; max_n_cells = in_max_n_cells; HANDLE_ERROR(cudaMalloc((void **)&d_crd_chg, max_n_atom_array * sizeof(real4))); HANDLE_ERROR(cudaMalloc((void **)&d_cell_z, max_n_cells * sizeof(real2))); HANDLE_ERROR(cudaMalloc((void **)&d_crd, max_n_atom_array * 3 * sizeof(real_pw))); HANDLE_ERROR(cudaMalloc((void **)&d_charge_orig, max_n_atoms_exbox * sizeof(real_pw))); HANDLE_ERROR(cudaMalloc((void **)&d_atomtype, max_n_atom_array * sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&d_atomids, max_n_atom_array * sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&d_atomids_rev, max_n_atoms_exbox * sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&d_atomtype_orig, max_n_atoms_exbox * sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&d_cell_pairs, max_n_cell_pairs * sizeof(CellPair))); HANDLE_ERROR(cudaMalloc((void **)&d_cell_pairs_buf, max_n_cell_pairs * sizeof(CellPair))); HANDLE_ERROR(cudaMalloc((void **)&d_idx_head_cell_pairs, (in_max_n_cells + 1) * sizeof(int))); HANDLE_ERROR(cudaMalloc((void **)&d_idx_cell_column, (in_max_n_cells) * sizeof(int))); HANDLE_ERROR(cudaHostAlloc((void **)&h_idx_cell_column, in_max_n_cells * sizeof(int), cudaHostAllocDefault)); // HANDLE_ERROR( cudaMalloc((void**)&d_cell_pair_removed, //(in_max_n_cells+1) * sizeof(int)) ); HANDLE_ERROR(cudaMalloc((void **)&d_n_cell_pairs, (max_n_cells) * sizeof(int))); // HANDLE_ERROR( cudaMalloc((void**)&d_grid_atom_index, //(max_n_ + 1) * sizeof(int)) ); HANDLE_ERROR(cudaMalloc((void **)&d_energy, N_MULTI_WORK * 2 * sizeof(real_fc))); HANDLE_ERROR(cudaMalloc((void **)&d_work, N_MULTI_WORK * max_n_atom_array * 3 * sizeof(real_fc))); HANDLE_ERROR(cudaMalloc((void **)&d_idx_xy_head_cell, in_n_columns * sizeof(int))); HANDLE_ERROR(cudaMemcpyToSymbol(D_MAX_N_CELL_PAIRS, &in_max_n_cell_pairs, sizeof(int))); return 0; } extern "C" int cuda_free_atom_info() { // printf("cuda_free_device_atom_info\n"); HANDLE_ERROR(cudaFree(d_crd_chg)); HANDLE_ERROR(cudaFree(d_cell_z)); HANDLE_ERROR(cudaFree(d_crd)); HANDLE_ERROR(cudaFree(d_atomids)); HANDLE_ERROR(cudaFree(d_atomids_rev)); HANDLE_ERROR(cudaFree(d_charge_orig)); HANDLE_ERROR(cudaFree(d_atomtype)); HANDLE_ERROR(cudaFree(d_atomtype_orig)); HANDLE_ERROR(cudaFree(d_cell_pairs)); HANDLE_ERROR(cudaFree(d_cell_pairs_buf)); HANDLE_ERROR(cudaFree(d_idx_head_cell_pairs)); HANDLE_ERROR(cudaFree(d_idx_cell_column)); HANDLE_ERROR(cudaFreeHost(h_idx_cell_column)); // HANDLE_ERROR( cudaFree(d_cell_pair_removed) ); HANDLE_ERROR(cudaFree(d_n_cell_pairs)); HANDLE_ERROR(cudaFree(d_energy)); HANDLE_ERROR(cudaFree(d_work)); HANDLE_ERROR(cudaFree(d_idx_xy_head_cell)); // HANDLE_ERROR( cudaFree(d_uni2cell_z)); // HANDLE_ERROR( cudaFree(d_work_orig) ); return 0; } extern "C" int cuda_memcpy_htod_atomids(int *&h_atomids, int *&h_idx_xy_head_cell) { HANDLE_ERROR(cudaMemset(d_atomids, -1, sizeof(int) * max_n_atom_array)); HANDLE_ERROR(cudaMemcpy(d_atomids, h_atomids, n_atom_array * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR( cudaMemcpy(d_idx_xy_head_cell, h_idx_xy_head_cell, (n_columns + 1) * sizeof(int), cudaMemcpyHostToDevice)); return 0; } // cuda_memcpy_htod_atom_info // Arrays of charges and atomtypes of all atoms in the process are sent to // the device. extern "C" int cuda_memcpy_htod_atom_info(real_pw *&h_charge_orig, int *&h_atomtype_orig) { HANDLE_ERROR(cudaMemcpy(d_charge_orig, h_charge_orig, n_atoms_system * sizeof(real_pw), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_atomtype_orig, h_atomtype_orig, n_atoms_system * sizeof(int), cudaMemcpyHostToDevice)); return 0; } // cuda_memcpy_htod_crd // Sending nsgrid.crd to device //extern "C" int cuda_memcpy_htod_crd(real_pw *&h_crd) { extern "C" int cuda_memcpy_htod_crd(real_pw *&h_crd) { //HANDLE_ERROR(cudaMemcpy(d_crd, h_crd, n_atom_array * 3 * sizeof(real_pw), cudaMemcpyHostToDevice)); //printf("cuda_memcpy_htod_crd h_crd : %d\n",n_atom_array * 3 * sizeof(real_pw)); //cudaMemoryTest(); //HANDLE_ERROR(cudaMemcpy(d_crd, h_crd, sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_crd, h_crd, n_atom_array * 3 * sizeof(real_pw), cudaMemcpyHostToDevice)); //HANDLE_ERROR(cudaMemcpy(d_crd, h_crd, n_atom_array * 3 * sizeof(real_pw), cudaMemcpyHostToDevice)); // cudaMemoryTest(); return 0; } extern "C" int cuda_set_pbc(real_pw *l, real_pw *lb) { printf("cuda_set_pbc\n"); HANDLE_ERROR(cudaMemcpyToSymbol(PBC_L, l, sizeof(real_pw) * 3)); real_pw l_inv[3]; for(int d=0; d<3; d++) l_inv[d] = 1.0/l[d]; HANDLE_ERROR(cudaMemcpyToSymbol(PBC_L_INV, l_inv, sizeof(real_pw) * 3)); HANDLE_ERROR(cudaMemcpyToSymbol(PBC_LOWER_BOUND, lb, sizeof(real_pw) * 3)); return 0; } extern "C" int cuda_zerodipole_constant(real_pw zcore, real_pw bcoeff, real_pw fcoeff) { HANDLE_ERROR(cudaMemcpyToSymbol(D_ZCORE, &zcore, sizeof(real_pw))); HANDLE_ERROR(cudaMemcpyToSymbol(D_BCOEFF, &bcoeff, sizeof(real_pw))); HANDLE_ERROR(cudaMemcpyToSymbol(D_FCOEFF, &fcoeff, sizeof(real_pw))); return 0; } // cuda_set_cell_constant // These constants are updated when the cell grid is updated extern "C" int cuda_set_cell_constant(const int in_n_cells, const int in_n_atoms_exbox, const int in_n_atom_array, const int * in_n_cells_xyz, const int in_n_columns, const real_pw *in_l_cell_xyz, const int * in_n_neighbor_xyz) { n_atoms_exbox = in_n_atoms_exbox; n_cells = in_n_cells; n_atom_array = in_n_atom_array; n_columns = in_n_columns; HANDLE_ERROR(cudaMemcpyToSymbol(D_N_CELLS, &n_cells, sizeof(int))); HANDLE_ERROR(cudaMemcpyToSymbol(D_N_ATOM_ARRAY, &n_atom_array, sizeof(int))); HANDLE_ERROR(cudaMemcpyToSymbol(D_N_CELLS_XYZ, in_n_cells_xyz, sizeof(int) * 3)); HANDLE_ERROR(cudaMemcpyToSymbol(D_N_COLUMNS, &n_columns, sizeof(int))); HANDLE_ERROR(cudaMemcpyToSymbol(D_L_CELL_XYZ, in_l_cell_xyz, sizeof(real_pw) * 3)); HANDLE_ERROR(cudaMemcpyToSymbol(D_N_NEIGHBOR_XYZ, in_n_neighbor_xyz, sizeof(int) * 3)); const int n_neighbor_col = (in_n_neighbor_xyz[0] * 2 + 1) * (in_n_neighbor_xyz[1] * 2 + 1); HANDLE_ERROR(cudaMemcpyToSymbol(D_N_NEIGHBOR_COL, &n_neighbor_col, sizeof(int))); const int max_n_cell_pairs_per_cell = max_n_cell_pairs / n_cells; HANDLE_ERROR(cudaMemcpyToSymbol(D_MAX_N_CELL_PAIRS_PER_CELL, &max_n_cell_pairs_per_cell, sizeof(int))); //printf("dbg0420 const %d %d %d %d %d %d ", //n_cells, n_atom_array, //in_n_cells_xyz[0], in_n_cells_xyz[1], in_n_cells_xyz[2], //n_columns); //printf(" %f %f %f %d %d %d %d %d\n", //in_l_cell_xyz[0], in_l_cell_xyz[1], in_l_cell_xyz[2], //in_n_neighbor_xyz[0],in_n_neighbor_xyz[1],in_n_neighbor_xyz[2], //n_neighbor_col, max_n_cell_pairs_per_cell); return 0; } // cuda_set_constant // called only onece at the beginning of simulation extern "C" int cuda_set_constant(real_pw cutoff, real_pw cutoff_pairlist, int n_atomtypes) { real_pw tmp_charge_coeff = (real_pw)332.06378; // CelesteObject::CHARGE_COEFF; HANDLE_ERROR(cudaMemcpyToSymbol(D_CHARGE_COEFF, &tmp_charge_coeff, sizeof(real_pw))); HANDLE_ERROR(cudaMemcpyToSymbol(D_N_ATOMTYPES, &n_atomtypes, sizeof(int))); HANDLE_ERROR(cudaMemcpyToSymbol(D_CUTOFF, &cutoff, sizeof(real_pw))); HANDLE_ERROR(cudaMemcpyToSymbol(D_CUTOFF_PAIRLIST, &cutoff_pairlist, sizeof(real_pw))); const real_pw cutoff_pairlist_2 = cutoff_pairlist * cutoff_pairlist; HANDLE_ERROR(cudaMemcpyToSymbol(D_CUTOFF_PAIRLIST_2, &cutoff_pairlist_2, sizeof(real_pw))); return 0; } extern "C" int cuda_alloc_set_lj_params(real_pw * h_lj_6term, real_pw * h_lj_12term, int n_lj_types, int * h_nb15off, const int in_max_n_nb15off) { // printf("threads : %d\n", PW_THREADS); printf("cuda_alloc_set_lj_params\n"); const unsigned int size_lj_matrix = sizeof(real_pw) * n_lj_types * n_lj_types; // cudaMalloc HANDLE_ERROR(cudaMalloc((void **)&d_lj_6term, size_lj_matrix)); HANDLE_ERROR(cudaMalloc((void **)&d_lj_12term, size_lj_matrix)); max_n_nb15off = in_max_n_nb15off; HANDLE_ERROR(cudaMemcpyToSymbol(D_MAX_N_NB15OFF, &max_n_nb15off, sizeof(int))); const unsigned int size_nb15off_orig = sizeof(int) * max_n_nb15off * max_n_atoms_exbox; HANDLE_ERROR(cudaMalloc((void **)&d_nb15off_orig, size_nb15off_orig)); size_nb15off = max_n_nb15off * max_n_atom_array; printf("dbg0414 size_nb15off : %d\n",size_nb15off); HANDLE_ERROR(cudaMalloc((void **)&d_nb15off, sizeof(int) * size_nb15off)); // cudaBindTexture2D /* cudaChannelFormatDesc desc = cudaCreateChannelDesc<real>(); HANDLE_ERROR( cudaBindTexture2D( NULL, tex_lj_6term, d_lj_6term, desc, n_lj_types, n_lj_types, sizeof(real)*n_lj_types) ); HANDLE_ERROR( cudaBindTexture2D( NULL, tex_lj_12term, d_lj_12term, desc, n_lj_types, n_lj_types, sizeof(real)*n_lj_types) ); */ // cudaMempcpy HANDLE_ERROR(cudaMemcpy(d_lj_6term, h_lj_6term, size_lj_matrix, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_lj_12term, h_lj_12term, size_lj_matrix, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_nb15off_orig, h_nb15off, size_nb15off_orig, cudaMemcpyHostToDevice)); return 0; } extern "C" int cuda_free_lj_params() { // printf("cuda_free_lj_param\n"); // cudaUnbindTexture(tex_lj_6term); // cudaUnbindTexture(tex_lj_12term); HANDLE_ERROR(cudaFree(d_lj_6term)); HANDLE_ERROR(cudaFree(d_lj_12term)); HANDLE_ERROR(cudaFree(d_nb15off_orig)); HANDLE_ERROR(cudaFree(d_nb15off)); return 0; } // cuda_hostalloc_atom_type_charge extern "C" int cuda_hostalloc_atom_type_charge(int *&h_atom_type, real_pw *&h_charge, const int in_n_atoms_system) { n_atoms_system = in_n_atoms_system; HANDLE_ERROR(cudaMemcpyToSymbol(D_N_ATOMS_SYSTEM, &in_n_atoms_system, sizeof(int))); printf("hostalloc atom_type_charge cu %d\n", in_n_atoms_system); HANDLE_ERROR(cudaHostAlloc((void **)&h_atom_type, n_atoms_system * sizeof(int), cudaHostAllocDefault)); HANDLE_ERROR(cudaHostAlloc((void **)&h_charge, n_atoms_system * sizeof(real_pw), cudaHostAllocDefault)); return 0; } // cuda_hostalloc_atom_info // Allocation for MiniCell members extern "C" int cuda_hostalloc_atom_info(real_pw *& h_crd, int *& h_atomids, real_fc *& h_work, real_fc *& h_energy, int in_max_n_atom_array) { max_n_atom_array = in_max_n_atom_array; printf("hostalloc_atom_info %d\n", max_n_atom_array); HANDLE_ERROR(cudaHostAlloc((void **)&h_crd, max_n_atom_array * 3 * sizeof(real_pw), cudaHostAllocDefault)); HANDLE_ERROR(cudaHostAlloc((void **)&h_atomids, max_n_atom_array * sizeof(int), cudaHostAllocDefault)); HANDLE_ERROR(cudaHostAlloc((void **)&h_work, max_n_atom_array * 3 * sizeof(real_fc), cudaHostAllocDefault)); HANDLE_ERROR(cudaHostAlloc((void **)&h_energy, 2 * sizeof(real_fc), cudaHostAllocDefault)); return 0; } extern "C" int cuda_hostalloc_cell_info(int *&h_idx_xy_head_cell, int n_columns) { printf("cuda_hostalloc_cell_info cu\n"); HANDLE_ERROR(cudaHostAlloc((void **)&h_idx_xy_head_cell, (n_columns) * sizeof(int), cudaHostAllocDefault)); return 0; } extern "C" int cuda_hostalloc_cellpair_info(CellPair *&h_cell_pairs, int *& h_idx_head_cell_pairs, int *& h_n_cells_z, int max_n_cell_pairs, int max_n_cells, int n_columns) { printf("cuda_hostalloc_cellpair_info cu\n"); HANDLE_ERROR(cudaHostAlloc((void **)&h_cell_pairs, max_n_cell_pairs * sizeof(CellPair), cudaHostAllocDefault)); HANDLE_ERROR(cudaHostAlloc((void **)&h_idx_head_cell_pairs, (max_n_cells) * sizeof(int), cudaHostAllocDefault)); HANDLE_ERROR(cudaHostAlloc((void **)&h_n_cells_z, (n_columns) * sizeof(int), cudaHostAllocDefault)); return 0; } extern "C" int cuda_hostfree_cellpair_info(CellPair *h_cell_pairs, int *h_idx_head_cell_pairs, int *&h_n_cells_z) { HANDLE_ERROR(cudaFreeHost(h_cell_pairs)); HANDLE_ERROR(cudaFreeHost(h_idx_head_cell_pairs)); HANDLE_ERROR(cudaFreeHost(h_n_cells_z)); } extern "C" int cuda_hostfree_atom_type_charge(int *h_atom_type, real_pw *h_charge) { HANDLE_ERROR(cudaFreeHost(h_atom_type)); HANDLE_ERROR(cudaFreeHost(h_charge)); return 0; } extern "C" int cuda_hostfree_atom_info(real_pw *h_crd, int *h_atomids, real_fc *&h_work, real_fc *&h_energy) { HANDLE_ERROR(cudaFreeHost(h_crd)); HANDLE_ERROR(cudaFreeHost(h_atomids)); HANDLE_ERROR(cudaFreeHost(h_work)); HANDLE_ERROR(cudaFreeHost(h_energy)); return 0; } extern "C" int cuda_hostfree_cell_info(int *h_idx_xy_head_cell) { HANDLE_ERROR(cudaFreeHost(h_idx_xy_head_cell)); return 0; } __global__ void kernel_set_nb15off(const int *d_atomids, const int *d_atomids_rev, const int *d_nb15off_orig, int * d_nb15off) { /*if(threadIdx.x != 0) return; for(int g_thread_idx=0; g_thread_idx/D_MAX_N_NB15OFF < D_N_ATOM_ARRAY; g_thread_idx++){ const int atomid = g_thread_idx / D_MAX_N_NB15OFF; const int idx = g_thread_idx % D_MAX_N_NB15OFF; if (d_atomids[atomid] < 0) { d_nb15off[g_thread_idx] = atomid; } else { const int orig = d_nb15off_orig[d_atomids[atomid] * D_MAX_N_NB15OFF + idx]; if (orig == -1) { d_nb15off[g_thread_idx] = -1; } else { d_nb15off[g_thread_idx] = d_atomids_rev[orig]; } } }*/ const int g_thread_idx = threadIdx.x + blockDim.x * blockIdx.x; const int atomid = g_thread_idx / D_MAX_N_NB15OFF; const int idx = g_thread_idx % D_MAX_N_NB15OFF; if (atomid >= D_N_ATOM_ARRAY) return; if (d_atomids[atomid] < 0) { d_nb15off[g_thread_idx] = atomid; } else { const int orig = d_nb15off_orig[d_atomids[atomid] * D_MAX_N_NB15OFF + idx]; if (orig == -1) { d_nb15off[g_thread_idx] = -1; } else { d_nb15off[g_thread_idx] = d_atomids_rev[orig]; } } } __global__ void kernel_set_atominfo(const int * d_atomids, const int * d_atomtype_orig, const real_pw *d_charge_orig, int * d_atomtype, real4 * d_crd_chg, int * d_atomids_rev) { //dbg0420 int atomid = threadIdx.x + blockIdx.x * blockDim.x; if (atomid < D_N_ATOM_ARRAY && d_atomids[atomid] >= 0) { //if(threadIdx.x != 0) return; //for(int atomid = 0 ; atomid < D_N_ATOM_ARRAY; atomid ++){ d_atomtype[atomid] = d_atomtype_orig[d_atomids[atomid]]; d_crd_chg[atomid].w = d_charge_orig[d_atomids[atomid]]; d_atomids_rev[d_atomids[atomid]] = atomid; } } __global__ void kernel_set_crd(const int *d_atomids, const real_pw *d_crd, real4 *d_crd_chg, real2 *d_cell_z) { //dbg0420 /*if(threadIdx.x != 0) return; for(int atomid=0; atomid < D_N_ATOM_ARRAY; atomid++){ int at_idx = atomid * 3; d_crd_chg[atomid].x = d_crd[at_idx]; d_crd_chg[atomid].y = d_crd[at_idx + 1]; d_crd_chg[atomid].z = d_crd[at_idx + 2]; }*/ int atomid = threadIdx.x + blockIdx.x * blockDim.x; if (atomid < D_N_ATOM_ARRAY) { int at_idx = atomid * 3; d_crd_chg[atomid].x = d_crd[at_idx]; d_crd_chg[atomid].y = d_crd[at_idx + 1]; d_crd_chg[atomid].z = d_crd[at_idx + 2]; /*if(atomid % N_ATOM_CELL == 0){ d_cell_z[atomid/N_ATOM_CELL].x = d_crd[at_idx+2]; }else if(atomid % N_ATOM_CELL == N_ATOM_CELL-1){ d_cell_z[atomid/N_ATOM_CELL].y = d_crd[at_idx+2]; }*/ } } extern "C" int cuda_set_atominfo() { HANDLE_ERROR(cudaMemset(d_atomtype, -1, sizeof(int) * max_n_atom_array)); HANDLE_ERROR(cudaMemset(d_atomids_rev, -1, sizeof(int) * max_n_atoms_exbox)); HANDLE_ERROR(cudaMemset(d_nb15off, -1, sizeof(int) * size_nb15off)); HANDLE_ERROR(cudaMemset(d_n_cell_pairs, 0, sizeof(int) * max_n_cells)); // HANDLE_ERROR( cudaMemset(d_cell_pair_removed, 0, sizeof(int)*max_n_cells )); int blocks1 = (n_atom_array + REORDER_THREADS - 1) / REORDER_THREADS; kernel_set_atominfo<<<blocks1, REORDER_THREADS>>>(d_atomids, d_atomtype_orig, d_charge_orig, d_atomtype, d_crd_chg, d_atomids_rev); cudaThreadSynchronize(); //kernel_set_atominfo<<<1, REORDER_THREADS>>>(d_atomids, d_atomtype_orig, //d_charge_orig, d_atomtype, //d_crd_chg, d_atomids_rev); int blocks2 = (n_atom_array * max_n_nb15off + REORDER_THREADS - 1) / REORDER_THREADS; //dbg0420 kernel_set_nb15off<<<blocks2, REORDER_THREADS>>>(d_atomids, d_atomids_rev, d_nb15off_orig, d_nb15off); //kernel_set_nb15off<<<1, REORDER_THREADS>>>(d_atomids, d_atomids_rev, d_nb15off_orig, d_nb15off); cudaThreadSynchronize(); return 0; } extern "C" int cuda_set_crd() { int blocks = (n_atom_array + REORDER_THREADS - 1) / REORDER_THREADS; //dbg0420 kernel_set_crd<<<blocks, REORDER_THREADS>>>(d_atomids, d_crd, d_crd_chg, d_cell_z); cudaThreadSynchronize(); //kernel_set_crd<<<1, REORDER_THREADS>>>(d_atomids, d_crd, d_crd_chg, d_cell_z); return 0; } __global__ void kernel_set_work_orig(real_fc *d_work, const int *d_atomids) { int atomid = threadIdx.x + blockIdx.x * blockDim.x; if (atomid < D_N_ATOM_ARRAY) { // && d_atomids[atomid] >= 0){ int index_orig = atomid * 3; for (int n = 1; n < N_MULTI_WORK; n++) { int index = (atomid + D_N_ATOM_ARRAY * n) * 3; d_work[index_orig + 0] += d_work[index + 0]; d_work[index_orig + 1] += d_work[index + 1]; d_work[index_orig + 2] += d_work[index + 2]; } } } __global__ void kernel_reduction_energy(real_fc *d_energy) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = 1; i < N_MULTI_WORK; i++) { if (tid == 1) { d_energy[0] += d_energy[i * 2]; d_energy[1] += d_energy[i * 2 + 1]; // printf("ene %f %f\n",d_energy[0],d_energy[1]); } } } __device__ bool check_15off64(const int atom_idx1, const int atom_idx2, const int *bitmask, int & mask_id, int & interact_bit) { int bit_pos = atom_idx2 * N_ATOM_CELL + atom_idx1; mask_id = bit_pos / 32; interact_bit = 1 << (bit_pos % 32); return (bitmask[mask_id] & interact_bit) == interact_bit; } __device__ real_pw check_15off(const int atomid1, const int atomid2, const int tmask_a1, const int tmask_a2) { int aid_diff = atomid2 - atomid1; int target = tmask_a1; if (aid_diff < 0) { aid_diff = -aid_diff; target = tmask_a2; } int mask = 0; if (aid_diff <= 32) mask = 1 << (aid_diff - 1); real_pw valid_pair = 1.0; if (mask != 0 && (mask & target) == mask) valid_pair = 0.0; return valid_pair; } __device__ real_pw cal_pair(real_pw & w1, real_pw & w2, real_pw & w3, real_pw & ene_vdw, real_pw & ene_ele, const real4 &crd_chg1, const real4 &crd_chg2, const int & atomtype1, const int & atomtype2, const real_pw *__restrict__ d_lj_6term, const real_pw *__restrict__ d_lj_12term) { const real_pw d12[3] = {crd_chg1.x - crd_chg2.x, crd_chg1.y - crd_chg2.y, crd_chg1.z - crd_chg2.z}; const real_pw r12_2 = d12[0] * d12[0] + d12[1] * d12[1] + d12[2] * d12[2]; const real_pw r12 = sqrt(r12_2); if (r12 >= D_CUTOFF) { return r12; } const real_pw r12_inv = 1.0 / r12; const real_pw r12_2_inv = r12_inv * r12_inv; const real_pw r12_3_inv = r12_inv * r12_2_inv; const real_pw r12_6_inv = r12_3_inv * r12_3_inv; const real_pw r12_12_inv = r12_6_inv * r12_6_inv; const real_pw term6 = d_lj_6term[atomtype1 * D_N_ATOMTYPES + atomtype2] * r12_6_inv; const real_pw term12 = d_lj_12term[atomtype1 * D_N_ATOMTYPES + atomtype2] * r12_12_inv; real_pw work_coef = r12_2_inv * (-12.0 * term12 + 6.0 * term6); const real_pw cc = crd_chg1.w * crd_chg2.w * D_CHARGE_COEFF; work_coef -= cc * (r12_3_inv - D_FCOEFF); w1 = (work_coef)*d12[0]; w2 = (work_coef)*d12[1]; w3 = (work_coef)*d12[2]; ene_ele = cc * (r12_inv - D_ZCORE + D_BCOEFF * r12_2); ene_vdw = (-term6 + term12); return r12; } __global__ void kernel_pairwise_ljzd(const real4 *d_crd_chg, CellPair * d_cell_pairs, const int * d_idx_head_cell_pairs, const int * d_atomtype, const real_pw *__restrict__ d_lj_6term, const real_pw *__restrict__ d_lj_12term, real_fc *d_energy, real_fc *d_work) { // const bool flg_mod_15mask){ real_fc ene_vdw = 0.0; real_fc ene_ele = 0.0; const int global_threadIdx = blockDim.x * blockIdx.x + threadIdx.x; const int c1 = global_threadIdx >> 5; const int warpIdx = threadIdx.x >> 5; if (c1 >= D_N_CELLS) { return; } const int laneIdx = global_threadIdx & 31; const int n_loops = (d_idx_head_cell_pairs[c1 + 1] - d_idx_head_cell_pairs[c1]) * 2; const int ene_index_offset = global_threadIdx % N_MULTI_WORK; real_fc work_c1[3] = {0.0, 0.0, 0.0}; const int atom_idx1 = (laneIdx & 7); // laneIdx%8 const int a1 = c1 * N_ATOM_CELL + atom_idx1; __shared__ real4 crd_chg1[N_ATOM_CELL * (PW_THREADS >> 5)]; __shared__ int atomtype1[N_ATOM_CELL * (PW_THREADS >> 5)]; const int sharedmem_idx = N_ATOM_CELL * warpIdx + atom_idx1; if (laneIdx < N_ATOM_CELL) { crd_chg1[sharedmem_idx] = d_crd_chg[c1 * N_ATOM_CELL + laneIdx]; atomtype1[sharedmem_idx] = d_atomtype[c1 * N_ATOM_CELL + laneIdx]; } __syncthreads(); CellPair cellpair; int cp; for (int loopIdx = 0; loopIdx < n_loops; loopIdx++) { if (loopIdx % 2 == 0) { if (laneIdx == 0) { cp = d_idx_head_cell_pairs[c1] + (loopIdx >> 1); if (cp >= D_N_CELL_PAIRS) break; cellpair = d_cell_pairs[cp]; } cp = __shfl(cp, 0); cellpair.cell_id1 = __shfl(cellpair.cell_id1, 0); cellpair.cell_id2 = __shfl(cellpair.cell_id2, 0); cellpair.image = __shfl(cellpair.image, 0); cellpair.pair_mask[0] = __shfl(cellpair.pair_mask[0], 0); cellpair.pair_mask[1] = __shfl(cellpair.pair_mask[1], 0); } if (cellpair.cell_id1 != c1) break; const int c2 = cellpair.cell_id2; // atom_idx ... index in cell, 0-7 const int atom_idx2 = (laneIdx >> 3) + 4 * (loopIdx % 2); // laneIdx/8 + 4*(warpIdx%2) // remove 1-2, 1-3, 1-4 pairs const int a2 = c2 * N_ATOM_CELL + atom_idx2; real4 crd_chg2; int atomtype2; if (atom_idx1 == 0) { crd_chg2 = d_crd_chg[a2]; atomtype2 = d_atomtype[a2]; if ((cellpair.image & 1) == 1) crd_chg2.x -= PBC_L[0]; else if ((cellpair.image & 2) == 2) crd_chg2.x += PBC_L[0]; if ((cellpair.image & 4) == 4) crd_chg2.y -= PBC_L[1]; else if ((cellpair.image & 8) == 8) crd_chg2.y += PBC_L[1]; if ((cellpair.image & 16) == 16) crd_chg2.z -= PBC_L[2]; else if ((cellpair.image & 32) == 32) crd_chg2.z += PBC_L[2]; } int atomid2_top = laneIdx - laneIdx % 8; crd_chg2.x = __shfl(crd_chg2.x, laneIdx - atom_idx1); crd_chg2.y = __shfl(crd_chg2.y, laneIdx - atom_idx1); crd_chg2.z = __shfl(crd_chg2.z, laneIdx - atom_idx1); crd_chg2.w = __shfl(crd_chg2.w, laneIdx - atom_idx1); atomtype2 = __shfl(atomtype2, laneIdx - atom_idx1); real_pw w1 = 0.0, w2 = 0.0, w3 = 0.0; real_pw cur_ene_ele = 0.0; real_pw cur_ene_vdw = 0.0; int mask_id; int interact_bit; if (!check_15off64(atom_idx1, atom_idx2, cellpair.pair_mask, mask_id, interact_bit)) { real_pw r12 = cal_pair(w1, w2, w3, cur_ene_vdw, cur_ene_ele, // d_crd_chg[a1], crd_chg1[sharedmem_idx], crd_chg2, // d_atomtype[a1], atomtype1[sharedmem_idx], atomtype2, d_lj_6term, d_lj_12term); // if(flg_mod_15mask && r12 < D_CUTOFF_PAIRLIST) interact_bit = 0; ene_vdw += cur_ene_vdw; ene_ele += cur_ene_ele; work_c1[0] += w1; work_c1[1] += w2; work_c1[2] += w3; } /*if(flg_mod_15mask){ for(int i = 32; i >= 1; i/=2){ interact_bit |= __shfl_xor(interact_bit, i); } if(laneIdx == 0) d_cell_pairs[cp].pair_mask[mask_id] |= interact_bit; }*/ for (int i = 4; i >= 1; i /= 2) { w1 += shfl_xor(w1, i, 8); w2 += shfl_xor(w2, i, 8); w3 += shfl_xor(w3, i, 8); } if (laneIdx % 8 == 0) { // && (w1 != 0.0 || w2 != 0.0 || w3 != 0.0)){ const int tmp_index = (((global_threadIdx / WARPSIZE) % N_MULTI_WORK) * D_N_ATOM_ARRAY + a2) * 3; atomicAdd2(&(d_work[tmp_index + 0]), -w1); atomicAdd2(&(d_work[tmp_index + 1]), -w2); atomicAdd2(&(d_work[tmp_index + 2]), -w3); } } for (int i = 16; i >= 8; i /= 2) { work_c1[0] += shfl_xor(work_c1[0], i, 32); work_c1[1] += shfl_xor(work_c1[1], i, 32); work_c1[2] += shfl_xor(work_c1[2], i, 32); } if (laneIdx < 8) { const int tmp_index = ((ene_index_offset * D_N_ATOM_ARRAY) + a1) * 3; atomicAdd2(&(d_work[tmp_index + 0]), work_c1[0]); atomicAdd2(&(d_work[tmp_index + 1]), work_c1[1]); atomicAdd2(&(d_work[tmp_index + 2]), work_c1[2]); } for (int i = 16; i >= 1; i /= 2) { ene_vdw += shfl_xor(ene_vdw, i, 32); ene_ele += shfl_xor(ene_ele, i, 32); } if (laneIdx == 0) { const int tmp_index = ((global_threadIdx / 32) % N_MULTI_WORK) * 2; atomicAdd2(&(d_energy[tmp_index + 0]), ene_vdw); atomicAdd2(&(d_energy[tmp_index + 1]), ene_ele); } } __global__ void set_idx_head_cell_pairs(const int *d_n_cell_pairs, int *d_idx_head_cell_pairs) { // n_rep: the number of cells to be processed in each thread. /* dbg0415 int n_rep = (D_N_CELLS + REORDER_THREADS - 1) / REORDER_THREADS; for (int i = 0; i < n_rep; i++) { const int idx_head = REORDER_THREADS * i; const int idx_write = idx_head + threadIdx.x; if (idx_write < D_N_CELLS) { if (idx_write > 0) { const int idx = ((d_n_cell_pairs[idx_write - 1] + CP_PER_THREAD - 1) / CP_PER_THREAD) * CP_PER_THREAD; d_idx_head_cell_pairs[idx_write] = idx; } else { d_idx_head_cell_pairs[idx_write] = 0; } } for (int j = 1; j < REORDER_THREADS; j *= 2) { const int idx = (threadIdx.x / j); if (idx_write < D_N_CELLS && idx % 2 == 1) { d_idx_head_cell_pairs[idx_write] += d_idx_head_cell_pairs[idx_head + idx * j - 1]; } __syncthreads(); } if (i > 0) { d_idx_head_cell_pairs[idx_write] += d_idx_head_cell_pairs[idx_head - 1]; } __syncthreads(); } if (threadIdx.x == 0) { const int idx = ((d_n_cell_pairs[D_N_CELLS - 1] + CP_PER_THREAD - 1) / CP_PER_THREAD) * CP_PER_THREAD; d_idx_head_cell_pairs[D_N_CELLS] = d_idx_head_cell_pairs[D_N_CELLS - 1] + idx; } */ if (threadIdx.x == 0) { //const int idx = ((d_n_cell_pairs[D_N_CELLS - 1] + CP_PER_THREAD - 1) / CP_PER_THREAD) * CP_PER_THREAD; //d_idx_head_cell_pairs[D_N_CELLS] = d_idx_head_cell_pairs[D_N_CELLS - 1] + idx; d_idx_head_cell_pairs[0] = 0; for(int i=1; i <= D_N_CELLS; i++){ d_idx_head_cell_pairs[i] = d_idx_head_cell_pairs[i-1] + d_n_cell_pairs[i-1]; //printf("n:%d head:%d\n", //d_n_cell_pairs[i] , d_idx_head_cell_pairs[i]); } } // printf("max cp: %d\n",idx_cp); } __global__ void pack_cellpairs_array(CellPair * d_cell_pairs, const CellPair *d_cell_pairs_buf, const int * d_n_cell_pairs, const int * d_idx_head_cell_pairs) { const int cp = blockDim.x * blockIdx.x + threadIdx.x; if (cp >= D_MAX_N_CELL_PAIRS) return; //if (cp >= D_N_CELL_PAIRS) return; const CellPair cellpair = d_cell_pairs_buf[cp]; //printf("dbg0414b cp:%d %d %d\n",cp,cellpair.cell_id1,cellpair.cell_id2); if (cellpair.cell_id1 < 0 || cellpair.cell_id2 < 0 || cellpair.cell_id1 >= D_N_CELLS || cellpair.cell_id2 >= D_N_CELLS) { return; } const int cp_in_cell1 = cp - cellpair.cell_id1 * D_MAX_N_CELL_PAIRS_PER_CELL; if (cp_in_cell1 >= d_n_cell_pairs[cellpair.cell_id1]) { printf("Error: cp_in_cell1:%d d_n_cell_pairs:%d cp:%d c1:%d head:%d\n", cp_in_cell1, d_n_cell_pairs[cellpair.cell_id1], cp, cellpair.cell_id1, d_idx_head_cell_pairs[cellpair.cell_id1]); return; } const int dest = d_idx_head_cell_pairs[cellpair.cell_id1] + cp_in_cell1; if (dest < cellpair.cell_id1 || dest >= D_MAX_N_CELL_PAIRS){ printf("!!?? dest: %d (%d-%d) cp_in_cell:%d head:%d\n", dest, cellpair.cell_id1, cellpair.cell_id2, cp_in_cell1, d_idx_head_cell_pairs[cellpair.cell_id1]); } d_cell_pairs[dest] = cellpair; // if(cellpair.cell_id1 == 2 || cellpair.cell_id1 ==3 ){ // printf("dbg0414kernelpack cp:%d c1:%d c2:%d incell:%d dest:%d head:%d\n", cp, cellpair.cell_id1,cellpair.cell_id2, cp_in_cell1, dest, d_idx_head_cell_pairs[cellpair.cell_id1]); // } } __global__ void kernel_reset_cellpairs(CellPair *d_cell_pairs, int *d_n_cell_pairs, const int n_cells) { const int cell1_id = blockDim.x * blockIdx.x + threadIdx.x; if (cell1_id >= n_cells) { return; } int n_cp1 = d_n_cell_pairs[cell1_id]; for (int cell2 = 0; cell2 < n_cp1; cell2++) { bool flg = true; int n_mask_int = (N_ATOM_CELL * N_ATOM_CELL + 31) / 32; const int cp = D_MAX_N_CELL_PAIRS_PER_CELL * cell1_id + cell2; for (int i = 0; i < n_mask_int; i++) flg &= (d_cell_pairs[cp].pair_mask[i] == ~0); if (flg) { d_n_cell_pairs[cell1_id]--; int cp_src = D_MAX_N_CELL_PAIRS_PER_CELL * cell1_id + --n_cp1; d_cell_pairs[cp] = d_cell_pairs[cp_src]; } } d_n_cell_pairs[cell1_id] = n_cp1; } extern "C" int cuda_pairwise_ljzd(const bool flg_mod_15mask) { HANDLE_ERROR(cudaMemset(d_energy, 0.0, sizeof(real_fc) * 2 * N_MULTI_WORK)); HANDLE_ERROR(cudaMemset(d_work, 0.0, sizeof(real_fc) * max_n_atom_array * 3 * N_MULTI_WORK)); cudaStreamCreate(&stream_pair_home); const int blocks = (n_cells + PW_THREADS / 32 - 1) / (PW_THREADS / 32); kernel_pairwise_ljzd<<<blocks, PW_THREADS, 0, stream_pair_home>>>(d_crd_chg, d_cell_pairs, d_idx_head_cell_pairs, d_atomtype, d_lj_6term, d_lj_12term, d_energy, d_work); // if(flg_mod_15mask){ // const int blocks2 = (n_cal_cells+PW_THREADS-1) / PW_THREADS; // kernel_reset_cellpairs<<<blocks2, PW_THREADS, 0, stream_pair_home>>> //(d_cell_pairs, d_n_cell_pairs, n_cal_cells); //} return 0; } extern "C" int cuda_thread_sync() { cudaThreadSynchronize(); return 0; } extern "C" int cuda_pair_sync() { cudaStreamSynchronize(stream_pair_home); cudaStreamDestroy(stream_pair_home); return 0; } extern "C" int cuda_memcpy_dtoh_work(real_fc *&h_work, real_fc *&h_energy, int n_atoms, int n_atom_array) { // printf("! cuda_memcpy_dtoh_work\n"); int blocks = (n_atom_array + REORDER_THREADS - 1) / REORDER_THREADS; // printf("kernel_set_work_orig\n"); if(N_MULTI_WORK > 1) { cudaStream_t stream_reduction1; cudaStream_t stream_reduction2; cudaStreamCreate(&stream_reduction1); cudaStreamCreate(&stream_reduction2); kernel_set_work_orig<<<blocks, REORDER_THREADS, 0, stream_reduction1>>>(d_work, d_atomids); kernel_reduction_energy<<<1, REORDER_THREADS, 0, stream_reduction2>>>(d_energy); cudaStreamSynchronize(stream_reduction1); cudaStreamSynchronize(stream_reduction2); cudaStreamDestroy(stream_reduction1); cudaStreamDestroy(stream_reduction2); } HANDLE_ERROR(cudaMemcpy(h_work, d_work, sizeof(real_fc) * n_atom_array * 3, cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(h_energy, d_energy, sizeof(real_fc) * 2, cudaMemcpyDeviceToHost)); //printf("cuda ene %f %f\n",h_energy[0], h_energy[1]); return 0; } int cuda_reset_work_ene() { HANDLE_ERROR(cudaMemset(d_work, 0.0, sizeof(real_fc) * max_n_atom_array * 3 * N_MULTI_WORK)); HANDLE_ERROR(cudaMemset(d_energy, 0.0, sizeof(real_fc) * 2 * N_MULTI_WORK)); return 0; } __device__ int get_column_id_from_crd(const int x, const int y) { return y * D_N_CELLS_XYZ[0] + x; } __device__ bool check_valid_pair(const int cell1_id, const int cell2_id) { const bool cell1_odd = cell1_id % 2 != 0; const bool cell2_odd = cell2_id % 2 != 0; if (cell1_odd) { if ((cell2_id < cell1_id && !cell2_odd) || (cell2_id > cell1_id && cell2_odd)) return false; } else { if ((cell2_id < cell1_id && cell2_odd) || (cell2_id > cell1_id && !cell2_odd)) return false; } return true; } __device__ int set_cell_pair_bitmask(const int cell_id1, const int cell_id2, const int cell1_id_in_block, const int *s_nb15off, const int n_atom_cell2, int * pair_mask) { for (int i = 0; i < N_BITMASK; i++) pair_mask[i] = 0; int a1 = N_ATOM_CELL * cell_id1; for (int a1_cell = 0; a1_cell < N_ATOM_CELL; a1++, a1_cell++) { bool flg1 = false; if (s_nb15off[(cell1_id_in_block * N_ATOM_CELL + a1_cell) * D_MAX_N_NB15OFF] == a1) flg1 = true; int a2 = N_ATOM_CELL * cell_id2; for (int a2_cell = 0; a2_cell < N_ATOM_CELL; a2++, a2_cell++) { const int bit_pos = a2_cell * N_ATOM_CELL + a1_cell; const int mask_id = bit_pos / 32; const int mask_pos = bit_pos % 32; const int add_bit = 1 << mask_pos; bool flg12 = false; if (flg1) flg12 = true; if (a2_cell >= n_atom_cell2) flg12 = true; else if ((cell_id1 == cell_id2 && a1 >= a2)) flg12 = true; else { const int tail = (cell1_id_in_block * N_ATOM_CELL + a1_cell + 1) * D_MAX_N_NB15OFF; for (int i = tail - D_MAX_N_NB15OFF; i < tail && s_nb15off[i] != -1; i++) { if (s_nb15off[i] == a2) { flg12 = true; break; } } } if (flg12) { pair_mask[mask_id] |= add_bit; } } } return 0; } __device__ CellPair get_new_cell_pair(const int cell1_id, const int cell2_id, const int image[3]) { CellPair new_cp; new_cp.cell_id1 = cell1_id; new_cp.cell_id2 = cell2_id; int bit_image = 0; if (image[0] == -1) bit_image = bit_image | 1; else if (image[0] == 1) bit_image = bit_image | 2; if (image[1] == -1) bit_image = bit_image | 4; else if (image[1] == 1) bit_image = bit_image | 8; if (image[2] == -1) bit_image = bit_image | 16; else if (image[2] == 1) bit_image = bit_image | 32; new_cp.image = bit_image; new_cp.pair_mask[0] = ~0; new_cp.pair_mask[1] = ~0; return new_cp; } __global__ void kernel_enumerate_cell_pair(const real4 *d_crd_chg, const real2 *d_cell_z, const int * d_idx_xy_head_cell, const int * d_idx_cell_column, const int * d_atomids, const int * d_nb15off_orig, const int * d_nb15off, int * d_n_cell_pairs, CellPair * d_cell_pairs) { // 1 warp calculates pairs with a cell const int g_thread_id = (threadIdx.x + blockIdx.x * blockDim.x); const int cell1_id = g_thread_id / D_N_NEIGHBOR_COL; if (cell1_id >= D_N_CELLS) return; const int neighbor_col_id = g_thread_id % D_N_NEIGHBOR_COL; int cell1_crd[3]; const int col1_id = d_idx_cell_column[cell1_id]; cell1_crd[0] = col1_id % D_N_CELLS_XYZ[0]; cell1_crd[1] = col1_id / D_N_CELLS_XYZ[0]; cell1_crd[2] = cell1_id - d_idx_xy_head_cell[col1_id]; const real4 crd_chg11 = d_crd_chg[cell1_id * N_ATOM_CELL]; const real_pw cell1_z_bottom = crd_chg11.z; // const real_pw cell1_z_top = d_cell_z[cell1_id].y; const real_pw cell1_z_top = d_crd_chg[(cell1_id + 1) * N_ATOM_CELL - 1].z; int image[3] = {0, 0, 0}; const int idx_cell_pair_head = D_MAX_N_CELL_PAIRS_PER_CELL * cell1_id; int d_cell[3]; d_cell[0] = neighbor_col_id % (D_N_NEIGHBOR_XYZ[0] * 2 + 1) - D_N_NEIGHBOR_XYZ[0]; d_cell[1] = neighbor_col_id / (D_N_NEIGHBOR_XYZ[0] * 2 + 1) - D_N_NEIGHBOR_XYZ[1]; int rel_x[3]; int cell2_crd[3]; real_pw dx[3] = {0.0, 0.0, 0.0}; for (int d = 0; d < 2; d++) { image[d] = 0; rel_x[d] = cell1_crd[d] + d_cell[d]; cell2_crd[d] = rel_x[d]; if (rel_x[d] < 0) { image[d] = -1; cell2_crd[d] = D_N_CELLS_XYZ[d] + rel_x[d]; } else if (rel_x[d] >= D_N_CELLS_XYZ[d]) { image[d] = 1; cell2_crd[d] = rel_x[d] - D_N_CELLS_XYZ[d]; } if (d_cell[d] > 0) dx[d] = (d_cell[d] - 1) * D_L_CELL_XYZ[d]; else if (d_cell[d] < 0) dx[d] = (d_cell[d] + 1) * D_L_CELL_XYZ[d]; dx[d] = dx[d] * dx[d]; } /* 20150612-- */ const int col2_id = cell2_crd[0] + cell2_crd[1] * D_N_CELLS_XYZ[0]; const int n_cells_in_col2 = d_idx_xy_head_cell[col2_id + 1] - d_idx_xy_head_cell[col2_id]; int inc = 1; bool flg_up = true; bool flg_down = true; for (d_cell[2] = 0; flg_up || flg_down; d_cell[2] += inc) { rel_x[2] = cell1_crd[2] + d_cell[2]; image[2] = 0; cell2_crd[2] = rel_x[2]; if (rel_x[2] < 0) { image[2] = -1; cell2_crd[2] += n_cells_in_col2; } else if (rel_x[2] >= n_cells_in_col2) { image[2] = 1; cell2_crd[2] -= n_cells_in_col2; } const int cell2_id = d_idx_xy_head_cell[col2_id] + cell2_crd[2]; // const real_pw cell2_z_bottom = d_cell_z[cell2_id].x + image[2] * PBC_L[2]; // const real_pw cell2_z_top = d_cell_z[cell2_id].y + image[2] * PBC_L[2]; const real_pw cell2_z_bottom = d_crd_chg[cell2_id * N_ATOM_CELL].z + image[2] * PBC_L[2]; const real_pw cell2_z_top = d_crd_chg[(cell2_id + 1) * N_ATOM_CELL - 1].z + image[2] * PBC_L[2]; if (cell2_z_top < cell1_z_bottom) { dx[2] = cell1_z_bottom - cell2_z_top; dx[2] = dx[2] * dx[2]; if (inc == -1 && dx[0] + dx[1] + dx[2] > D_CUTOFF_PAIRLIST_2) { flg_down = false; } } else if (cell2_z_bottom > cell1_z_top) { dx[2] = cell2_z_bottom - cell1_z_top; dx[2] = dx[2] * dx[2]; if (inc == 1 && dx[0] + dx[1] + dx[2] > D_CUTOFF_PAIRLIST_2) { d_cell[2] = 0; inc = -1; flg_up = false; } } else { dx[2] = 0.0; } if (dx[0] + dx[1] + dx[2] < D_CUTOFF_PAIRLIST_2) { if (check_valid_pair(cell1_id, cell2_id)) { const int cp_idx_cell = atomicAdd(&d_n_cell_pairs[cell1_id], 1); if (cp_idx_cell >= D_MAX_N_CELL_PAIRS_PER_CELL) { printf("Index exceeds the maximum value. %d / %d\n", cp_idx_cell, D_MAX_N_CELL_PAIRS_PER_CELL); } d_cell_pairs[idx_cell_pair_head + cp_idx_cell] = get_new_cell_pair(cell1_id, cell2_id, image); } } } } __global__ void kernel_init_cell_pairs(CellPair *d_cell_pairs, CellPair *d_cell_pairs_buf) { const int cp_id = threadIdx.x + blockDim.x * blockIdx.x; if (cp_id >= D_MAX_N_CELL_PAIRS) return; CellPair new_cp; new_cp.cell_id1 = -1; new_cp.cell_id2 = -1; new_cp.image = 0; for (int i = 0; i < N_BITMASK; i++) { new_cp.pair_mask[i] = ~0; } d_cell_pairs[cp_id] = new_cp; d_cell_pairs_buf[cp_id] = new_cp; } __global__ void set_cell_pairs_nb15off(const int *d_idx_head_cell_pairs, const int *d_nb15off, const int *d_atomids, const int n_cell_pairs, CellPair * d_cell_pairs) { const int cp_block_first = blockIdx.x * blockDim.x; const int cp = threadIdx.x + cp_block_first; if (cp >= n_cell_pairs) return; //printf("dbg0415a cp : %d cp_block_first : %d blockIdx: %d blockDim: %d threadIdx : %d\n",cp, cp_block_first,blockIdx.x, blockDim.x, threadIdx.x); const int cell1_id = d_cell_pairs[cp].cell_id1; const int cell2_id = d_cell_pairs[cp].cell_id2; if (cell1_id < 0 || cell2_id < 0) return; const int cell1_id_in_block = cell1_id - d_cell_pairs[cp_block_first].cell_id1; //if(cell1_id_in_block < 0){ //printf("dbg0415a2 %d %d %d %d t:%d bid:%d bdim:%d\n",cell1_id_in_block, cell1_id, cp_block_first, d_cell_pairs[cp_block_first].cell_id1, threadIdx.x, blockIdx.x, blockDim.x); //} if(cp_block_first >= n_cell_pairs){ printf("ERROR! cp_block_first >= n_cell_pairs: \n %d %d\n",cp_block_first, n_cell_pairs); } //printf("val1: %d val2: %d\n", cell1_id * N_ATOM_CELL * MAX_N_NB15OFF + i, cp_block_first) if (cell1_id_in_block >= MAX_N_CELL_BLOCK) { printf("The number of cells in each block exceeds the constant MAX_N_CELL_BLOCK: %d / %d\ncell: %d %d - %d " "cp_block_first:%d " "c1_in_first:%d\n", cell1_id_in_block, MAX_N_CELL_BLOCK, cp, cell1_id, cell2_id, cp_block_first, d_cell_pairs[cp_block_first].cell_id1); } __shared__ int s_nb15off[MAX_N_CELL_BLOCK * N_ATOM_CELL * MAX_N_NB15OFF]; //printf("dbg0415b %d %d %d %d %d %d\n", threadIdx.x, cp, cell1_id, //cell1_id_in_block * N_ATOM_CELL * MAX_N_NB15OFF, //MAX_N_CELL_BLOCK * N_ATOM_CELL * MAX_N_NB15OFF, //cell1_id * N_ATOM_CELL * MAX_N_NB15OFF); if (threadIdx.x == 0 || d_cell_pairs[cp - 1].cell_id1 != cell1_id) { for (int i = 0; i < N_ATOM_CELL * MAX_N_NB15OFF; i++) { s_nb15off[cell1_id_in_block * N_ATOM_CELL * MAX_N_NB15OFF + i] = d_nb15off[cell1_id * N_ATOM_CELL * MAX_N_NB15OFF + i]; } //printf("dbg0415c d_nb15off %d %d\n",cell1_id * N_ATOM_CELL * MAX_N_NB15OFF, //cell1_id * N_ATOM_CELL * MAX_N_NB15OFF + N_ATOM_CELL*MAX_N_NB15OFF-1); } __syncthreads(); int n_atom_cell2 = 0; const int tail = (cell2_id + 1) * N_ATOM_CELL; for (int at2 = tail - N_ATOM_CELL; at2 < tail; at2++) { if (d_atomids[at2] >= 0) n_atom_cell2++; else break; } set_cell_pair_bitmask(cell1_id, cell2_id, cell1_id_in_block, s_nb15off, n_atom_cell2, d_cell_pairs[cp].pair_mask); } int set_idx_cell_column(const int *idx_atom_cell_xy, const int n_cells, const int *h_atomids) { for (int cell_id = 0; cell_id < n_cells; cell_id++) { h_idx_cell_column[cell_id] = idx_atom_cell_xy[cell_id * N_ATOM_CELL]; // printf("cell: %d col: %d atid: %d \n", // cell_id, h_idx_cell_column[cell_id], // h_atomids[cell_id*N_ATOM_CELL]); } return 0; } extern "C" int cuda_enumerate_cell_pairs(int *& h_atomids, const int n_cells, // const int n_uni, const int n_neighbor_col, const int *idx_atom_cell_xy) { // printf("dbg0413 cuda_enumerate_cell_pairs_01\n"); // cudaMemoryTest(); set_idx_cell_column(idx_atom_cell_xy, n_cells, h_atomids); // printf("dbg0413 cuda_enumerate_cell_pairs_02\n"); // cudaMemoryTest(); HANDLE_ERROR(cudaMemcpy(d_idx_cell_column, h_idx_cell_column, n_cells * sizeof(int), cudaMemcpyHostToDevice)); // printf("dbg0413 cuda_enumerate_cell_pairs_03\n"); // cudaMemoryTest(); //cudaStream_t stream1; // cudaStream_t stream2; //cudaStreamCreate(&stream1); // cudaStreamCreate(&stream2); // HANDLE_ERROR( cudaMemset(d_cell_pairs, -1, sizeof(int)*5*D_MAX_N_CELL_PAIRS)); // HANDLE_ERROR( cudaMemset(d_cell_pairs_buf, -1, sizeof(int)*5*D_MAX_N_CELL_PAIRS)); const int blocks3 = (max_n_cell_pairs + REORDER_THREADS - 1) / REORDER_THREADS; kernel_init_cell_pairs<<<blocks3, REORDER_THREADS>>>(d_cell_pairs, d_cell_pairs_buf); cudaThreadSynchronize(); // printf("dbg0413 cuda_enumerate_cell_pairs_04\n"); //cudaMemoryTest(); const int blocks4 = (n_neighbor_col * n_cells + REORDER_THREADS - 1) / REORDER_THREADS; // printf("bbb %d\n", max_n_cell_pairs); kernel_enumerate_cell_pair<<<blocks4, REORDER_THREADS>>>( // d_uni2cell_z, d_crd_chg, d_cell_z, d_idx_xy_head_cell, d_idx_cell_column, d_atomids, d_nb15off_orig, d_nb15off, d_n_cell_pairs, d_cell_pairs_buf); // d_cell_pairs); //printf("dbg0413 cuda_enumerate_cell_pairs_05\n"); // cudaMemoryTest(); cudaThreadSynchronize(); set_idx_head_cell_pairs<<<1, REORDER_THREADS>>>(d_n_cell_pairs, d_idx_head_cell_pairs); cudaThreadSynchronize(); // n_cell_pairs = d_idx_head_cell_pairs[n_cells+1]; HANDLE_ERROR(cudaMemcpy(&n_cell_pairs, &d_idx_head_cell_pairs[n_cells], sizeof(int), cudaMemcpyDeviceToHost)); // printf("dbg0413 cuda_enumerate_cell_pairs_06\n"); //cudaMemoryTest(); HANDLE_ERROR(cudaMemcpyToSymbol(D_N_CELL_PAIRS, &n_cell_pairs, sizeof(int))); //printf("dbg0413 cuda_enumerate_cell_pairs_07\n"); //cudaMemoryTest(); pack_cellpairs_array<<<blocks3, REORDER_THREADS>>>(d_cell_pairs, d_cell_pairs_buf, d_n_cell_pairs, d_idx_head_cell_pairs); cudaThreadSynchronize(); //printf("dbg0413 cuda_enumerate_cell_pairs_08\n"); //cudaMemoryTest(); // debug code //int* h_n_cell_pairs; //HANDLE_ERROR(cudaHostAlloc((void **)&h_n_cell_pairs, max_n_cells * sizeof(int), cudaHostAllocDefault)); //HANDLE_ERROR(cudaMemcpy(h_n_cell_pairs, d_n_cell_pairs, max_n_cells * sizeof(int), cudaMemcpyDeviceToHost)); // for(int i_ncp=0; i_ncp < max_n_cells; i_ncp++){ //printf("dbg0414ncp cp %d: %d\n", i_ncp, h_n_cell_pairs[i_ncp]); //} //int* h_idx_head; //HANDLE_ERROR(cudaHostAlloc((void **)&h_idx_head, (max_n_cells+1) * sizeof(int), cudaHostAllocDefault)); //HANDLE_ERROR(cudaMemcpy(h_idx_head, d_idx_head_cell_pairs, (max_n_cells+1) * sizeof(int), cudaMemcpyDeviceToHost)); //for(int i_ncp=0; i_ncp < max_n_cells+1; i_ncp++){ //printf("dbg0414headidx %d: %d\n", i_ncp, h_idx_head[i_ncp]); //} //dbg0420 /* for(int i_ncp=1; i_ncp < n_cells+1; i_ncp++){ if(h_idx_head[i_ncp] <= h_idx_head[i_ncp-1]){ for(int j_ncp=1; j_ncp < max_n_cells+1; j_ncp++){ printf("dbg0414headidx %d: %d ", j_ncp, h_idx_head[j_ncp]); if (i_ncp == j_ncp){ printf(" *here*"); } printf("\n"); } for(int j_ncp=0; j_ncp < max_n_cells; j_ncp++){ printf("dbg0414ncp cp %d: %d\n", j_ncp, h_n_cell_pairs[j_ncp]); } break; } } */ //HANDLE_ERROR(cudaFreeHost(h_n_cell_pairs)); //HANDLE_ERROR(cudaFreeHost(h_idx_head)); //CellPair* h_cp; //HANDLE_ERROR(cudaHostAlloc((void **)&h_cp, n_cell_pairs * sizeof(CellPair), cudaHostAllocDefault)); //HANDLE_ERROR(cudaMemcpy(h_cp, d_cell_pairs_buf, n_cell_pairs * sizeof(CellPair), cudaMemcpyDeviceToHost)); //for(int i_cp=0; i_cp < n_cell_pairs+10; i_cp++){ //printf("dbg0414a cp %d: %d %d\n", i_cp, h_cp[i_cp].cell_id1, h_cp[i_cp].cell_id2); //} //HANDLE_ERROR(cudaMemcpy(h_cp, d_cell_pairs, n_cell_pairs * sizeof(CellPair), cudaMemcpyDeviceToHost)); //for(int i_cp=0; i_cp < n_cell_pairs+10; i_cp++){ //printf("dbg0414b cp %d: %d %d\n", i_cp, h_cp[i_cp].cell_id1, h_cp[i_cp].cell_id2); //} //HANDLE_ERROR(cudaFreeHost(h_cp)); /// ! debug code const int blocks5 = (n_cell_pairs + REORDER_THREADS - 1) / REORDER_THREADS; set_cell_pairs_nb15off<<<blocks5, REORDER_THREADS>>>(d_idx_head_cell_pairs, d_nb15off, d_atomids, n_cell_pairs, d_cell_pairs); cudaThreadSynchronize(); //printf("dbg0413 cuda_enumerate_cell_pairs_09\n"); //cudaMemoryTest(); return 0; } extern "C" int cuda_memcpy_htod_cell_pairs(CellPair *&h_cell_pairs, int *&h_idx_head_cell_pairs, int n_cell_pairs) { // printf("cuda_memcpy_htod_cell_pairs\n"); HANDLE_ERROR(cudaMemcpy(d_cell_pairs, h_cell_pairs, n_cell_pairs * sizeof(CellPair), cudaMemcpyHostToDevice)); // HANDLE_ERROR( cudaMemset(d_idx_head_cell_pairs, -1, sizeof(int)*(max_n_cells+1)); HANDLE_ERROR( cudaMemcpy(d_idx_head_cell_pairs, h_idx_head_cell_pairs, (n_cells + 1) * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpyToSymbol(D_N_CELL_PAIRS, &n_cell_pairs, sizeof(int))); return 0; } extern "C" int cuda_alloc_set_hps_params(real_pw* h_hps_cutoff, real_pw* h_hps_lambda, int n_lj_types){ // printf("threads : %d\n", PW_THREADS); printf("cuda_alloc_set_hps_params\n"); const unsigned int size_lj_matrix = sizeof(real_pw) * n_lj_types * n_lj_types; // cudaMalloc HANDLE_ERROR(cudaMalloc((void **)&d_hps_cutoff, size_lj_matrix)); HANDLE_ERROR(cudaMalloc((void **)&d_hps_lambda, size_lj_matrix)); HANDLE_ERROR(cudaMemcpy(d_hps_cutoff, h_hps_cutoff, size_lj_matrix, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_hps_lambda, h_hps_lambda, size_lj_matrix, cudaMemcpyHostToDevice)); //printf("cudadbg %d\n", d_hps_lambda[0]); return 0; } extern "C" int cuda_free_hps_params() { // printf("cuda_free_lj_param\n"); // cudaUnbindTexture(tex_lj_6term); // cudaUnbindTexture(tex_lj_12term); HANDLE_ERROR(cudaFree(d_hps_cutoff)); HANDLE_ERROR(cudaFree(d_hps_lambda)); return 0; } extern "C" int cuda_hps_constant(real_pw hps_eps){ printf("set_cuda_hps_constant\n"); HANDLE_ERROR(cudaMemcpyToSymbol(D_HPS_EPS, &hps_eps, sizeof(real_pw))); return 0; } extern "C" int cuda_debye_huckel_constant(real_pw in_dielect, real_pw in_temperature, real_pw in_ionic_strength){ printf("cuda_debye_huckel_constant (cuda)\n"); debye_length_inv = (1.0/(sqrt(PERMITTIVITY*in_dielect*BOLTZMAN*in_temperature/(2*AVOGADRO*ELEM_CHARGE*ELEM_CHARGE*in_ionic_strength))*1e10)); dielect_inv = 1.0 / in_dielect; HANDLE_ERROR(cudaMemcpyToSymbol(D_DEBYE_LEN_INV, &debye_length_inv, sizeof(real_pw))); printf("cuda debye_len_inv : %f\n", debye_length_inv); HANDLE_ERROR(cudaMemcpyToSymbol(D_DIELECT_INV, &dielect_inv, sizeof(real_pw))); printf("cuda dielect_inv : %f\n", dielect_inv); return 0; } __device__ real_pw cal_pair_hps_dh(real_pw & w1, real_pw & w2, real_pw & w3, real_pw & ene_vdw, real_pw & ene_ele, const real4 &crd_chg1, const real4 &crd_chg2, const int & atomtype1, const int & atomtype2, const real_pw *__restrict__ d_lj_6term, const real_pw *__restrict__ d_lj_12term, const real_pw *__restrict__ d_hps_cutoff, const real_pw *__restrict__ d_hps_lambda){ // const real_pw d12[3] = {crd_chg1.x - crd_chg2.x, crd_chg1.y - crd_chg2.y, crd_chg1.z - crd_chg2.z}; //for(int d=0; d < 3; d++){ real_pw tmp_d12 = crd_chg1.z - crd_chg2.z; tmp_d12 -= nearbyint(tmp_d12 * PBC_L_INV[2]) * PBC_L[2]; const real_pw d12[3] = {crd_chg1.x - crd_chg2.x, crd_chg1.y - crd_chg2.y, tmp_d12}; //d12[d] -= static_cast<int>(d12[d] * 1.0/PBC_L[d] + 0.5) * PBC_L[d]; //} const real_pw r12_2 = d12[0] * d12[0] + d12[1] * d12[1] + d12[2] * d12[2]; const real_pw r12 = sqrt(r12_2); if (r12 >= D_CUTOFF) { return r12; } const real_pw r12_inv = 1.0 / r12; const real_pw r12_2_inv = r12_inv * r12_inv; const real_pw r12_3_inv = r12_inv * r12_2_inv; const real_pw r12_6_inv = r12_3_inv * r12_3_inv; const real_pw r12_12_inv = r12_6_inv * r12_6_inv; const int pairtype = atomtype1 * D_N_ATOMTYPES + atomtype2; const real_pw hps_cutoff = d_hps_cutoff[pairtype]; const real_pw hps_lambda = d_hps_lambda[pairtype]; const real_pw term6 = d_lj_6term[pairtype] * r12_6_inv; const real_pw term12 = d_lj_12term[pairtype] * r12_12_inv; real_pw work_coef = r12_2_inv * (-12.0 * term12 + 6.0 * term6); ene_vdw = (-term6 + term12); if(r12 > hps_cutoff){ ene_vdw *= hps_lambda; work_coef *= hps_lambda; }else{ ene_vdw += (1-hps_lambda) * D_HPS_EPS; } const real_pw cc = crd_chg1.w * crd_chg2.w * D_CHARGE_COEFF; const real_pw r12_ld_exp = exp(-r12 * D_DEBYE_LEN_INV); ene_ele = cc * r12_inv * D_DIELECT_INV * r12_ld_exp; work_coef -= ene_ele * (r12_inv + D_DEBYE_LEN_INV); w1 = (work_coef)*d12[0]; w2 = (work_coef)*d12[1]; w3 = (work_coef)*d12[2]; return r12; } __global__ void kernel_pairwise_hps_dh(const real4 *d_crd_chg, CellPair * d_cell_pairs, const int * d_idx_head_cell_pairs, const int * d_atomtype, const real_pw *__restrict__ d_lj_6term, const real_pw *__restrict__ d_lj_12term, const real_pw *__restrict__ d_hps_cutoff, const real_pw *__restrict__ d_hps_lambda, real_fc *d_energy, real_fc *d_work) { // const bool flg_mod_15mask){ real_fc ene_vdw = 0.0; real_fc ene_ele = 0.0; const int global_threadIdx = blockDim.x * blockIdx.x + threadIdx.x; const int c1 = global_threadIdx >> 5; const int warpIdx = threadIdx.x >> 5; if (c1 >= D_N_CELLS) { return; } const int laneIdx = global_threadIdx & 31; const int n_loops = (d_idx_head_cell_pairs[c1 + 1] - d_idx_head_cell_pairs[c1]) * 2; const int ene_index_offset = global_threadIdx % N_MULTI_WORK; real_fc work_c1[3] = {0.0, 0.0, 0.0}; const int atom_idx1 = (laneIdx & 7); // laneIdx%8 const int a1 = c1 * N_ATOM_CELL + atom_idx1; __shared__ real4 crd_chg1[N_ATOM_CELL * (PW_THREADS >> 5)]; __shared__ int atomtype1[N_ATOM_CELL * (PW_THREADS >> 5)]; const int sharedmem_idx = N_ATOM_CELL * warpIdx + atom_idx1; if (laneIdx < N_ATOM_CELL) { crd_chg1[sharedmem_idx] = d_crd_chg[c1 * N_ATOM_CELL + laneIdx]; atomtype1[sharedmem_idx] = d_atomtype[c1 * N_ATOM_CELL + laneIdx]; } __syncthreads(); CellPair cellpair; int cp; for (int loopIdx = 0; loopIdx < n_loops; loopIdx++) { if (loopIdx % 2 == 0) { if (laneIdx == 0) { cp = d_idx_head_cell_pairs[c1] + (loopIdx >> 1); if (cp >= D_N_CELL_PAIRS) break; cellpair = d_cell_pairs[cp]; } cp = __shfl(cp, 0); cellpair.cell_id1 = __shfl(cellpair.cell_id1, 0); cellpair.cell_id2 = __shfl(cellpair.cell_id2, 0); cellpair.image = __shfl(cellpair.image, 0); cellpair.pair_mask[0] = __shfl(cellpair.pair_mask[0], 0); cellpair.pair_mask[1] = __shfl(cellpair.pair_mask[1], 0); } if (cellpair.cell_id1 != c1) break; const int c2 = cellpair.cell_id2; // atom_idx ... index in cell, 0-7 const int atom_idx2 = (laneIdx >> 3) + 4 * (loopIdx % 2); // laneIdx/8 + 4*(warpIdx%2) // remove 1-2, 1-3, 1-4 pairs const int a2 = c2 * N_ATOM_CELL + atom_idx2; real4 crd_chg2; int atomtype2; if (atom_idx1 == 0) { crd_chg2 = d_crd_chg[a2]; atomtype2 = d_atomtype[a2]; if ((cellpair.image & 1) == 1) crd_chg2.x -= PBC_L[0]; else if ((cellpair.image & 2) == 2) crd_chg2.x += PBC_L[0]; if ((cellpair.image & 4) == 4) crd_chg2.y -= PBC_L[1]; else if ((cellpair.image & 8) == 8) crd_chg2.y += PBC_L[1]; /* if ((cellpair.image & 16) == 16) crd_chg2.z -= PBC_L[2]; else if ((cellpair.image & 32) == 32) crd_chg2.z += PBC_L[2]; */ } int atomid2_top = laneIdx - laneIdx % 8; crd_chg2.x = __shfl(crd_chg2.x, laneIdx - atom_idx1); crd_chg2.y = __shfl(crd_chg2.y, laneIdx - atom_idx1); crd_chg2.z = __shfl(crd_chg2.z, laneIdx - atom_idx1); crd_chg2.w = __shfl(crd_chg2.w, laneIdx - atom_idx1); atomtype2 = __shfl(atomtype2, laneIdx - atom_idx1); real_pw w1 = 0.0, w2 = 0.0, w3 = 0.0; real_pw cur_ene_ele = 0.0; real_pw cur_ene_vdw = 0.0; int mask_id; int interact_bit; if (!check_15off64(atom_idx1, atom_idx2, cellpair.pair_mask, mask_id, interact_bit)) { real_pw r12 = cal_pair_hps_dh(w1, w2, w3, cur_ene_vdw, cur_ene_ele, crd_chg1[sharedmem_idx], crd_chg2, atomtype1[sharedmem_idx], atomtype2, d_lj_6term, d_lj_12term, d_hps_cutoff, d_hps_lambda); ene_vdw += cur_ene_vdw; ene_ele += cur_ene_ele; work_c1[0] += w1; work_c1[1] += w2; work_c1[2] += w3; } for (int i = 4; i >= 1; i /= 2) { w1 += shfl_xor(w1, i, 8); w2 += shfl_xor(w2, i, 8); w3 += shfl_xor(w3, i, 8); } if (laneIdx % 8 == 0) { // && (w1 != 0.0 || w2 != 0.0 || w3 != 0.0)){ const int tmp_index = (((global_threadIdx / WARPSIZE) % N_MULTI_WORK) * D_N_ATOM_ARRAY + a2) * 3; atomicAdd2(&(d_work[tmp_index + 0]), -w1); atomicAdd2(&(d_work[tmp_index + 1]), -w2); atomicAdd2(&(d_work[tmp_index + 2]), -w3); } } for (int i = 16; i >= 8; i /= 2) { work_c1[0] += shfl_xor(work_c1[0], i, 32); work_c1[1] += shfl_xor(work_c1[1], i, 32); work_c1[2] += shfl_xor(work_c1[2], i, 32); } if (laneIdx < 8) { const int tmp_index = ((ene_index_offset * D_N_ATOM_ARRAY) + a1) * 3; atomicAdd2(&(d_work[tmp_index + 0]), work_c1[0]); atomicAdd2(&(d_work[tmp_index + 1]), work_c1[1]); atomicAdd2(&(d_work[tmp_index + 2]), work_c1[2]); } for (int i = 16; i >= 1; i /= 2) { ene_vdw += shfl_xor(ene_vdw, i, 32); ene_ele += shfl_xor(ene_ele, i, 32); } if (laneIdx == 0) { const int tmp_index = ((global_threadIdx / 32) % N_MULTI_WORK) * 2; atomicAdd2(&(d_energy[tmp_index + 0]), ene_vdw); atomicAdd2(&(d_energy[tmp_index + 1]), ene_ele); } } extern "C" int cuda_pairwise_hps_dh(const bool flg_mod_15mask) { HANDLE_ERROR(cudaMemset(d_energy, 0.0, sizeof(real_fc) * 2 * N_MULTI_WORK)); HANDLE_ERROR(cudaMemset(d_work, 0.0, sizeof(real_fc) * max_n_atom_array * 3 * N_MULTI_WORK)); cudaStreamCreate(&stream_pair_home); const int blocks = (n_cells + PW_THREADS / 32 - 1) / (PW_THREADS / 32); kernel_pairwise_hps_dh<<<blocks, PW_THREADS, 0, stream_pair_home>>>(d_crd_chg, d_cell_pairs, d_idx_head_cell_pairs, d_atomtype, d_lj_6term, d_lj_12term, d_hps_cutoff, d_hps_lambda, d_energy, d_work); cudaThreadSynchronize(); return 0; }
0044566f6613ce03e985a4086bc8b67a11483880.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> #define N 2048 * 2048 // Number of elements in each vector __global__ void saxpy(int * a, int * b, int * c, int maxIndex) { // Determine our unique global thread ID, so we know which element to process int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = tid; i < maxIndex; i += stride) c[i] = 2 * a[i] + b[i]; } // init on gpu __global__ void initWith(int value, int * a){ int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = tid; i< N; i += stride) a[i] = value; } // check error inline hipError_t cudaCheck(hipError_t result){ if (result!=hipSuccess){ fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result==hipSuccess); } return result; } int main() { int *a, *b, *c, *h_c; int size = N * sizeof (int); // The total number of bytes per vector int deviceId; int numberOfSMs; cudaCheck(hipGetDevice(&deviceId)); cudaCheck(hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId)); // Allocate memory cudaCheck(hipMalloc(&a, size)); // gpu only cudaCheck(hipMalloc(&b, size)); // gpu only cudaCheck(hipMalloc(&c, size)); // gpu only cudaCheck(hipHostMalloc(&h_c, size)); // gpu only int threads_per_block = 256; int number_of_blocks = numberOfSMs * 32; const int numberOfSegments = 15; // This example demonstrates slicing the work into 4 segments. int segmentN = N / numberOfSegments; // A value for a segment's worth of `N` is needed. // Initialize memory hipLaunchKernelGGL(( initWith) , dim3(number_of_blocks), dim3(threads_per_block), 0, 0, 2, a); hipLaunchKernelGGL(( initWith) , dim3(number_of_blocks), dim3(threads_per_block), 0, 0, 1, b); for (int i = 0; i < numberOfSegments; ++i){ // Calculate the index where this particular segment should operate within the larger arrays. int offset = i * segmentN; // Create a stream for this segment's worth of copy and work. hipStream_t stream;//, stream_cpy; cudaCheck(hipStreamCreate(&stream)); // cudaCheck(hipStreamCreate(&stream_cpy)); hipEvent_t work; cudaCheck(hipEventCreate(&work)); hipLaunchKernelGGL(( saxpy) , dim3(number_of_blocks), dim3(threads_per_block), 0, stream, &a[offset], &b[offset], &c[offset], segmentN); cudaCheck(hipEventRecord(work, stream)); // `hipStreamDestroy` will return immediately (is non-blocking), but will not actually destroy stream until // all stream operations are complete. cudaCheck(hipStreamDestroy(stream)); // cudaCheck(hipStreamWaitEvent(stream_cpy, work, 0)); // cudaCheck(hipMemcpyAsync(&h_c[offset], &c[offset], size/segmentN, hipMemcpyDeviceToHost, stream_cpy)); // cudaCheck(hipStreamDestroy(stream_cpy)); } cudaCheck(hipGetLastError()); cudaCheck(hipDeviceSynchronize()); // Wait for the GPU to finish cudaCheck(hipMemcpy(h_c, c, size, hipMemcpyDeviceToHost)); // Print out the first and last 5 values of c for a quality check for( int i = 0; i < 5; ++i ) printf("h_c[%d] = %d, ", i, h_c[i]); printf ("\n"); for( int i = N-5; i < N; ++i ) printf("h_c[%d] = %d, ", i, h_c[i]); printf ("\n"); // Free all our allocated memory cudaCheck(hipFree(a)); cudaCheck(hipFree(b)); cudaCheck(hipFree(c)); cudaCheck(hipHostFree(h_c)); }
0044566f6613ce03e985a4086bc8b67a11483880.cu
#include <stdio.h> #include <assert.h> #define N 2048 * 2048 // Number of elements in each vector __global__ void saxpy(int * a, int * b, int * c, int maxIndex) { // Determine our unique global thread ID, so we know which element to process int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = tid; i < maxIndex; i += stride) c[i] = 2 * a[i] + b[i]; } // init on gpu __global__ void initWith(int value, int * a){ int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = tid; i< N; i += stride) a[i] = value; } // check error inline cudaError_t cudaCheck(cudaError_t result){ if (result!=cudaSuccess){ fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result==cudaSuccess); } return result; } int main() { int *a, *b, *c, *h_c; int size = N * sizeof (int); // The total number of bytes per vector int deviceId; int numberOfSMs; cudaCheck(cudaGetDevice(&deviceId)); cudaCheck(cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId)); // Allocate memory cudaCheck(cudaMalloc(&a, size)); // gpu only cudaCheck(cudaMalloc(&b, size)); // gpu only cudaCheck(cudaMalloc(&c, size)); // gpu only cudaCheck(cudaMallocHost(&h_c, size)); // gpu only int threads_per_block = 256; int number_of_blocks = numberOfSMs * 32; const int numberOfSegments = 15; // This example demonstrates slicing the work into 4 segments. int segmentN = N / numberOfSegments; // A value for a segment's worth of `N` is needed. // Initialize memory initWith <<<number_of_blocks, threads_per_block>>>(2, a); initWith <<<number_of_blocks, threads_per_block>>>(1, b); for (int i = 0; i < numberOfSegments; ++i){ // Calculate the index where this particular segment should operate within the larger arrays. int offset = i * segmentN; // Create a stream for this segment's worth of copy and work. cudaStream_t stream;//, stream_cpy; cudaCheck(cudaStreamCreate(&stream)); // cudaCheck(cudaStreamCreate(&stream_cpy)); cudaEvent_t work; cudaCheck(cudaEventCreate(&work)); saxpy <<<number_of_blocks, threads_per_block, 0, stream>>>(&a[offset], &b[offset], &c[offset], segmentN); cudaCheck(cudaEventRecord(work, stream)); // `cudaStreamDestroy` will return immediately (is non-blocking), but will not actually destroy stream until // all stream operations are complete. cudaCheck(cudaStreamDestroy(stream)); // cudaCheck(cudaStreamWaitEvent(stream_cpy, work, 0)); // cudaCheck(cudaMemcpyAsync(&h_c[offset], &c[offset], size/segmentN, cudaMemcpyDeviceToHost, stream_cpy)); // cudaCheck(cudaStreamDestroy(stream_cpy)); } cudaCheck(cudaGetLastError()); cudaCheck(cudaDeviceSynchronize()); // Wait for the GPU to finish cudaCheck(cudaMemcpy(h_c, c, size, cudaMemcpyDeviceToHost)); // Print out the first and last 5 values of c for a quality check for( int i = 0; i < 5; ++i ) printf("h_c[%d] = %d, ", i, h_c[i]); printf ("\n"); for( int i = N-5; i < N; ++i ) printf("h_c[%d] = %d, ", i, h_c[i]); printf ("\n"); // Free all our allocated memory cudaCheck(cudaFree(a)); cudaCheck(cudaFree(b)); cudaCheck(cudaFree(c)); cudaCheck(cudaFreeHost(h_c)); }
94fcc103166a06f8f7786613e11fcf534d40e73a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/null_mask.hpp> #include <cudf/transform.hpp> #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_list_utilities.hpp> #include <cudf_test/type_lists.hpp> #include <thrust/sequence.h> #include <random> template <typename T> struct TypedColumnTest : public cudf::test::BaseFixture { cudf::data_type type() { return cudf::data_type{cudf::type_to_id<T>()}; } TypedColumnTest() : data{_num_elements * cudf::size_of(type()), rmm::cuda_stream_default}, mask{cudf::bitmask_allocation_size_bytes(_num_elements), rmm::cuda_stream_default} { auto typed_data = static_cast<char*>(data.data()); auto typed_mask = static_cast<char*>(mask.data()); thrust::sequence(thrust::device, typed_data, typed_data + data.size()); thrust::sequence(thrust::device, typed_mask, typed_mask + mask.size()); } cudf::size_type num_elements() { return _num_elements; } std::random_device r; std::default_random_engine generator{r()}; std::uniform_int_distribution<cudf::size_type> distribution{200, 1000}; cudf::size_type _num_elements{distribution(generator)}; rmm::device_buffer data{}; rmm::device_buffer mask{}; rmm::device_buffer all_valid_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_VALID)}; rmm::device_buffer all_null_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_NULL)}; }; TYPED_TEST_CASE(TypedColumnTest, cudf::test::Types<int32_t>); /** * @brief Verifies equality of the properties and data of a `column`'s views. * * @param col The `column` to verify */ void verify_column_views(cudf::column col) { cudf::column_view view = col; cudf::mutable_column_view mutable_view = col; EXPECT_EQ(col.type(), view.type()); EXPECT_EQ(col.type(), mutable_view.type()); EXPECT_EQ(col.size(), view.size()); EXPECT_EQ(col.size(), mutable_view.size()); EXPECT_EQ(col.null_count(), view.null_count()); EXPECT_EQ(col.null_count(), mutable_view.null_count()); EXPECT_EQ(col.nullable(), view.nullable()); EXPECT_EQ(col.nullable(), mutable_view.nullable()); EXPECT_EQ(col.num_children(), view.num_children()); EXPECT_EQ(col.num_children(), mutable_view.num_children()); EXPECT_EQ(view.head(), mutable_view.head()); EXPECT_EQ(view.data<char>(), mutable_view.data<char>()); EXPECT_EQ(view.offset(), mutable_view.offset()); } TYPED_TEST(TypedColumnTest, DefaultNullCountNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_FALSE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountEmptyMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}}; EXPECT_FALSE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_TRUE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, ExplicitNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask), 0}; EXPECT_TRUE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_TRUE(col.nullable()); EXPECT_TRUE(col.has_nulls()); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ExplicitNullCountAllNull) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask), this->num_elements()}; EXPECT_TRUE(col.nullable()); EXPECT_TRUE(col.has_nulls()); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, SetNullCountNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_THROW(col.set_null_count(1), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetEmptyNullMaskNonZeroNullCount) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; rmm::device_buffer empty_null_mask{}; EXPECT_THROW(col.set_null_mask(empty_null_mask, this->num_elements()), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetInvalidSizeNullMaskNonZeroNullCount) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; auto invalid_size_null_mask = create_null_mask(::min(this->num_elements() - 50, 0), cudf::mask_state::ALL_VALID); EXPECT_THROW(col.set_null_mask(invalid_size_null_mask, this->num_elements()), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetNullCountEmptyMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}}; EXPECT_THROW(col.set_null_count(1), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_NO_THROW(col.set_null_count(0)); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, SetNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_NO_THROW(col.set_null_count(this->num_elements())); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ResetNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_EQ(this->num_elements(), col.null_count()); EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT)); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ResetNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_EQ(0, col.null_count()); EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT)); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, CopyDataNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_EQ(this->type(), col.type()); EXPECT_FALSE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify deep copy cudf::column_view v = col; EXPECT_NE(v.head(), this->data.data()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size()); } TYPED_TEST(TypedColumnTest, MoveDataNoMask) { void* original_data = this->data.data(); cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_EQ(this->type(), col.type()); EXPECT_FALSE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify shallow copy cudf::column_view v = col; EXPECT_EQ(v.head(), original_data); } TYPED_TEST(TypedColumnTest, CopyDataAndMask) { cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}}; EXPECT_EQ(this->type(), col.type()); EXPECT_TRUE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify deep copy cudf::column_view v = col; EXPECT_NE(v.head(), this->data.data()); EXPECT_NE(v.null_mask(), this->all_valid_mask.data()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.null_mask(), this->all_valid_mask.data(), this->mask.size()); } TYPED_TEST(TypedColumnTest, MoveDataAndMask) { void* original_data = this->data.data(); void* original_mask = this->all_valid_mask.data(); cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_EQ(this->type(), col.type()); EXPECT_TRUE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify shallow copy cudf::column_view v = col; EXPECT_EQ(v.head(), original_data); EXPECT_EQ(v.null_mask(), original_mask); } TYPED_TEST(TypedColumnTest, CopyConstructorNoMask) { cudf::column original{this->type(), this->num_elements(), std::move(this->data)}; cudf::column copy{original}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view original_view = original; cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); } TYPED_TEST(TypedColumnTest, CopyConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; cudf::column copy{original}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view original_view = original; cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); EXPECT_NE(original_view.null_mask(), copy_view.null_mask()); } TYPED_TEST(TypedColumnTest, MoveConstructorNoMask) { cudf::column original{this->type(), this->num_elements(), std::move(this->data)}; auto original_data = original.view().head(); cudf::column moved_to{std::move(original)}; EXPECT_EQ(0, original.size()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type()); verify_column_views(moved_to); // Verify move cudf::column_view moved_to_view = moved_to; EXPECT_EQ(original_data, moved_to_view.head()); } TYPED_TEST(TypedColumnTest, MoveConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; auto original_data = original.view().head(); auto original_mask = original.view().null_mask(); cudf::column moved_to{std::move(original)}; verify_column_views(moved_to); EXPECT_EQ(0, original.size()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type()); // Verify move cudf::column_view moved_to_view = moved_to; EXPECT_EQ(original_data, moved_to_view.head()); EXPECT_EQ(original_mask, moved_to_view.null_mask()); } TYPED_TEST(TypedColumnTest, ConstructWithChildren) { std::vector<std::unique_ptr<cudf::column>> children; ; children.emplace_back(std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::INT8}, 42, rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); children.emplace_back(std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::FLOAT64}, 314, rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}, cudf::UNKNOWN_NULL_COUNT, std::move(children)}; verify_column_views(col); EXPECT_EQ(2, col.num_children()); EXPECT_EQ(cudf::data_type{cudf::type_id::INT8}, col.child(0).type()); EXPECT_EQ(42, col.child(0).size()); EXPECT_EQ(cudf::data_type{cudf::type_id::FLOAT64}, col.child(1).type()); EXPECT_EQ(314, col.child(1).size()); } TYPED_TEST(TypedColumnTest, ReleaseNoChildren) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; auto original_data = col.view().head(); auto original_mask = col.view().null_mask(); cudf::column::contents contents = col.release(); EXPECT_EQ(original_data, contents.data->data()); EXPECT_EQ(original_mask, contents.null_mask->data()); EXPECT_EQ(0u, contents.children.size()); EXPECT_EQ(0, col.size()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type()); EXPECT_EQ(0, col.num_children()); } TYPED_TEST(TypedColumnTest, ReleaseWithChildren) { std::vector<std::unique_ptr<cudf::column>> children; children.emplace_back(std::make_unique<cudf::column>( this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); children.emplace_back(std::make_unique<cudf::column>( this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}, cudf::UNKNOWN_NULL_COUNT, std::move(children)}; auto original_data = col.view().head(); auto original_mask = col.view().null_mask(); cudf::column::contents contents = col.release(); EXPECT_EQ(original_data, contents.data->data()); EXPECT_EQ(original_mask, contents.null_mask->data()); EXPECT_EQ(2u, contents.children.size()); EXPECT_EQ(0, col.size()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type()); EXPECT_EQ(0, col.num_children()); } TYPED_TEST(TypedColumnTest, ColumnViewConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; cudf::column_view original_view = original; cudf::column copy{original_view}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); EXPECT_NE(original_view.null_mask(), copy_view.null_mask()); } template <typename T> struct ListsColumnTest : public cudf::test::BaseFixture { }; using NumericTypesNotBool = cudf::test::Concat<cudf::test::IntegralTypesNotBool, cudf::test::FloatingPointTypes>; TYPED_TEST_CASE(ListsColumnTest, NumericTypesNotBool); TYPED_TEST(ListsColumnTest, ListsColumnViewConstructor) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}}; auto result = std::make_unique<cudf::column>(list); cudf::test::expect_columns_equal(list, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructor) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}}; cudf::test::lists_column_wrapper<TypeParam> expect{{3, 4}, {5, 6, 7}}; auto sliced = cudf::slice(list, {1, 3}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedIncludesEmpty) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {}, {3, 4}, {8, 9}}; cudf::test::lists_column_wrapper<TypeParam> expect{{}, {3, 4}}; auto sliced = cudf::slice(list, {1, 3}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedNonNestedEmpty) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; // Column of List<int> LCW list{{1, 2}, {}, {3, 4}, {8, 9}}; // Column of 1 row, an empty List<int> LCW expect{LCW{}}; auto sliced = cudf::slice(list, {1, 2}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedNestedEmpty) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; // Column of List<List<int>>, with incomplete hierarchy LCW list{{LCW{1}, LCW{2}}, {}, // < ----------- empty List<List<int>>, slice this {LCW{3}, LCW{4, 5}}}; // Make 1-row column of type List<List<int>>, the row data contains 0 element. // Well-formed memory layout: // type: List<List<int>> // Length: 1 // Mask: 1 // Offsets: 0, 0 // List<int> // Length: 0 // Offset: // INT // Length: 0 auto leaf = std::make_unique<cudf::column>(cudf::column(LCW{})); auto offset = std::make_unique<cudf::column>(cudf::column(FWCW_SZ{0, 0})); auto null_mask = cudf::create_null_mask(0, cudf::mask_state::UNALLOCATED); auto expect = cudf::make_lists_column(1, std::move(offset), std::move(leaf), 0, std::move(null_mask)); auto sliced = cudf::slice(list, {1, 2}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNested) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; // Column of List<List<int>>, with incomplete hierarchy LCW list{{LCW{1}, LCW{2}}, {}, {LCW{3}, LCW{4, 5}}}; auto expect = cudf::empty_like(list); auto sliced = cudf::slice(list, {0, 0}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNonNested) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; LCW list{{1, 2}, {}, {3, 4}, {8, 9}}; auto expect = cudf::empty_like(list); auto sliced = cudf::slice(list, {0, 0}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructorWithNulls) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); auto expect_valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? false : true; }); using LCW = cudf::test::lists_column_wrapper<TypeParam>; cudf::test::lists_column_wrapper<TypeParam> list{ {{{{1, 2}, {3, 4}}, valids}, LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}}, valids}; cudf::test::lists_column_wrapper<TypeParam> expect{ {LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}}, expect_valids}; auto sliced = cudf::slice(list, {1, 5}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); // TODO: null mask equality is being checked separately because // expect_columns_equal doesn't do the check for lists columns. // This is fixed in https://github.com/rapidsai/cudf/pull/5904, // so we should remove this check after that's merged: cudf::test::expect_columns_equal( cudf::mask_to_bools(result->view().null_mask(), 0, 4)->view(), cudf::mask_to_bools(static_cast<cudf::column_view>(expect).null_mask(), 0, 4)->view()); } CUDF_TEST_PROGRAM_MAIN()
94fcc103166a06f8f7786613e11fcf534d40e73a.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/null_mask.hpp> #include <cudf/transform.hpp> #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_list_utilities.hpp> #include <cudf_test/type_lists.hpp> #include <thrust/sequence.h> #include <random> template <typename T> struct TypedColumnTest : public cudf::test::BaseFixture { cudf::data_type type() { return cudf::data_type{cudf::type_to_id<T>()}; } TypedColumnTest() : data{_num_elements * cudf::size_of(type()), rmm::cuda_stream_default}, mask{cudf::bitmask_allocation_size_bytes(_num_elements), rmm::cuda_stream_default} { auto typed_data = static_cast<char*>(data.data()); auto typed_mask = static_cast<char*>(mask.data()); thrust::sequence(thrust::device, typed_data, typed_data + data.size()); thrust::sequence(thrust::device, typed_mask, typed_mask + mask.size()); } cudf::size_type num_elements() { return _num_elements; } std::random_device r; std::default_random_engine generator{r()}; std::uniform_int_distribution<cudf::size_type> distribution{200, 1000}; cudf::size_type _num_elements{distribution(generator)}; rmm::device_buffer data{}; rmm::device_buffer mask{}; rmm::device_buffer all_valid_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_VALID)}; rmm::device_buffer all_null_mask{create_null_mask(num_elements(), cudf::mask_state::ALL_NULL)}; }; TYPED_TEST_CASE(TypedColumnTest, cudf::test::Types<int32_t>); /** * @brief Verifies equality of the properties and data of a `column`'s views. * * @param col The `column` to verify */ void verify_column_views(cudf::column col) { cudf::column_view view = col; cudf::mutable_column_view mutable_view = col; EXPECT_EQ(col.type(), view.type()); EXPECT_EQ(col.type(), mutable_view.type()); EXPECT_EQ(col.size(), view.size()); EXPECT_EQ(col.size(), mutable_view.size()); EXPECT_EQ(col.null_count(), view.null_count()); EXPECT_EQ(col.null_count(), mutable_view.null_count()); EXPECT_EQ(col.nullable(), view.nullable()); EXPECT_EQ(col.nullable(), mutable_view.nullable()); EXPECT_EQ(col.num_children(), view.num_children()); EXPECT_EQ(col.num_children(), mutable_view.num_children()); EXPECT_EQ(view.head(), mutable_view.head()); EXPECT_EQ(view.data<char>(), mutable_view.data<char>()); EXPECT_EQ(view.offset(), mutable_view.offset()); } TYPED_TEST(TypedColumnTest, DefaultNullCountNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_FALSE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountEmptyMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}}; EXPECT_FALSE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_TRUE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, ExplicitNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask), 0}; EXPECT_TRUE(col.nullable()); EXPECT_FALSE(col.has_nulls()); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, DefaultNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_TRUE(col.nullable()); EXPECT_TRUE(col.has_nulls()); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ExplicitNullCountAllNull) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask), this->num_elements()}; EXPECT_TRUE(col.nullable()); EXPECT_TRUE(col.has_nulls()); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, SetNullCountNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_THROW(col.set_null_count(1), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetEmptyNullMaskNonZeroNullCount) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; rmm::device_buffer empty_null_mask{}; EXPECT_THROW(col.set_null_mask(empty_null_mask, this->num_elements()), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetInvalidSizeNullMaskNonZeroNullCount) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; auto invalid_size_null_mask = create_null_mask(std::min(this->num_elements() - 50, 0), cudf::mask_state::ALL_VALID); EXPECT_THROW(col.set_null_mask(invalid_size_null_mask, this->num_elements()), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetNullCountEmptyMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data), rmm::device_buffer{}}; EXPECT_THROW(col.set_null_count(1), cudf::logic_error); } TYPED_TEST(TypedColumnTest, SetNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_NO_THROW(col.set_null_count(0)); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, SetNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_NO_THROW(col.set_null_count(this->num_elements())); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ResetNullCountAllNull) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_null_mask)}; EXPECT_EQ(this->num_elements(), col.null_count()); EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT)); EXPECT_EQ(this->num_elements(), col.null_count()); } TYPED_TEST(TypedColumnTest, ResetNullCountAllValid) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_EQ(0, col.null_count()); EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT)); EXPECT_EQ(0, col.null_count()); } TYPED_TEST(TypedColumnTest, CopyDataNoMask) { cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_EQ(this->type(), col.type()); EXPECT_FALSE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify deep copy cudf::column_view v = col; EXPECT_NE(v.head(), this->data.data()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size()); } TYPED_TEST(TypedColumnTest, MoveDataNoMask) { void* original_data = this->data.data(); cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; EXPECT_EQ(this->type(), col.type()); EXPECT_FALSE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify shallow copy cudf::column_view v = col; EXPECT_EQ(v.head(), original_data); } TYPED_TEST(TypedColumnTest, CopyDataAndMask) { cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}}; EXPECT_EQ(this->type(), col.type()); EXPECT_TRUE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify deep copy cudf::column_view v = col; EXPECT_NE(v.head(), this->data.data()); EXPECT_NE(v.null_mask(), this->all_valid_mask.data()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.head(), this->data.data(), this->data.size()); CUDF_TEST_EXPECT_EQUAL_BUFFERS(v.null_mask(), this->all_valid_mask.data(), this->mask.size()); } TYPED_TEST(TypedColumnTest, MoveDataAndMask) { void* original_data = this->data.data(); void* original_mask = this->all_valid_mask.data(); cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; EXPECT_EQ(this->type(), col.type()); EXPECT_TRUE(col.nullable()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(this->num_elements(), col.size()); EXPECT_EQ(0, col.num_children()); verify_column_views(col); // Verify shallow copy cudf::column_view v = col; EXPECT_EQ(v.head(), original_data); EXPECT_EQ(v.null_mask(), original_mask); } TYPED_TEST(TypedColumnTest, CopyConstructorNoMask) { cudf::column original{this->type(), this->num_elements(), std::move(this->data)}; cudf::column copy{original}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view original_view = original; cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); } TYPED_TEST(TypedColumnTest, CopyConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; cudf::column copy{original}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view original_view = original; cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); EXPECT_NE(original_view.null_mask(), copy_view.null_mask()); } TYPED_TEST(TypedColumnTest, MoveConstructorNoMask) { cudf::column original{this->type(), this->num_elements(), std::move(this->data)}; auto original_data = original.view().head(); cudf::column moved_to{std::move(original)}; EXPECT_EQ(0, original.size()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type()); verify_column_views(moved_to); // Verify move cudf::column_view moved_to_view = moved_to; EXPECT_EQ(original_data, moved_to_view.head()); } TYPED_TEST(TypedColumnTest, MoveConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; auto original_data = original.view().head(); auto original_mask = original.view().null_mask(); cudf::column moved_to{std::move(original)}; verify_column_views(moved_to); EXPECT_EQ(0, original.size()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type()); // Verify move cudf::column_view moved_to_view = moved_to; EXPECT_EQ(original_data, moved_to_view.head()); EXPECT_EQ(original_mask, moved_to_view.null_mask()); } TYPED_TEST(TypedColumnTest, ConstructWithChildren) { std::vector<std::unique_ptr<cudf::column>> children; ; children.emplace_back(std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::INT8}, 42, rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); children.emplace_back(std::make_unique<cudf::column>( cudf::data_type{cudf::type_id::FLOAT64}, 314, rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}, cudf::UNKNOWN_NULL_COUNT, std::move(children)}; verify_column_views(col); EXPECT_EQ(2, col.num_children()); EXPECT_EQ(cudf::data_type{cudf::type_id::INT8}, col.child(0).type()); EXPECT_EQ(42, col.child(0).size()); EXPECT_EQ(cudf::data_type{cudf::type_id::FLOAT64}, col.child(1).type()); EXPECT_EQ(314, col.child(1).size()); } TYPED_TEST(TypedColumnTest, ReleaseNoChildren) { cudf::column col{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; auto original_data = col.view().head(); auto original_mask = col.view().null_mask(); cudf::column::contents contents = col.release(); EXPECT_EQ(original_data, contents.data->data()); EXPECT_EQ(original_mask, contents.null_mask->data()); EXPECT_EQ(0u, contents.children.size()); EXPECT_EQ(0, col.size()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type()); EXPECT_EQ(0, col.num_children()); } TYPED_TEST(TypedColumnTest, ReleaseWithChildren) { std::vector<std::unique_ptr<cudf::column>> children; children.emplace_back(std::make_unique<cudf::column>( this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); children.emplace_back(std::make_unique<cudf::column>( this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default})); cudf::column col{this->type(), this->num_elements(), rmm::device_buffer{this->data, rmm::cuda_stream_default}, rmm::device_buffer{this->all_valid_mask, rmm::cuda_stream_default}, cudf::UNKNOWN_NULL_COUNT, std::move(children)}; auto original_data = col.view().head(); auto original_mask = col.view().null_mask(); cudf::column::contents contents = col.release(); EXPECT_EQ(original_data, contents.data->data()); EXPECT_EQ(original_mask, contents.null_mask->data()); EXPECT_EQ(2u, contents.children.size()); EXPECT_EQ(0, col.size()); EXPECT_EQ(0, col.null_count()); EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type()); EXPECT_EQ(0, col.num_children()); } TYPED_TEST(TypedColumnTest, ColumnViewConstructorWithMask) { cudf::column original{ this->type(), this->num_elements(), std::move(this->data), std::move(this->all_valid_mask)}; cudf::column_view original_view = original; cudf::column copy{original_view}; verify_column_views(copy); CUDF_TEST_EXPECT_COLUMNS_EQUAL(original, copy); // Verify deep copy cudf::column_view copy_view = copy; EXPECT_NE(original_view.head(), copy_view.head()); EXPECT_NE(original_view.null_mask(), copy_view.null_mask()); } template <typename T> struct ListsColumnTest : public cudf::test::BaseFixture { }; using NumericTypesNotBool = cudf::test::Concat<cudf::test::IntegralTypesNotBool, cudf::test::FloatingPointTypes>; TYPED_TEST_CASE(ListsColumnTest, NumericTypesNotBool); TYPED_TEST(ListsColumnTest, ListsColumnViewConstructor) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}}; auto result = std::make_unique<cudf::column>(list); cudf::test::expect_columns_equal(list, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructor) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {3, 4}, {5, 6, 7}, {8, 9}}; cudf::test::lists_column_wrapper<TypeParam> expect{{3, 4}, {5, 6, 7}}; auto sliced = cudf::slice(list, {1, 3}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedIncludesEmpty) { cudf::test::lists_column_wrapper<TypeParam> list{{1, 2}, {}, {3, 4}, {8, 9}}; cudf::test::lists_column_wrapper<TypeParam> expect{{}, {3, 4}}; auto sliced = cudf::slice(list, {1, 3}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedNonNestedEmpty) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; // Column of List<int> LCW list{{1, 2}, {}, {3, 4}, {8, 9}}; // Column of 1 row, an empty List<int> LCW expect{LCW{}}; auto sliced = cudf::slice(list, {1, 2}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedNestedEmpty) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; // Column of List<List<int>>, with incomplete hierarchy LCW list{{LCW{1}, LCW{2}}, {}, // < ----------- empty List<List<int>>, slice this {LCW{3}, LCW{4, 5}}}; // Make 1-row column of type List<List<int>>, the row data contains 0 element. // Well-formed memory layout: // type: List<List<int>> // Length: 1 // Mask: 1 // Offsets: 0, 0 // List<int> // Length: 0 // Offset: // INT // Length: 0 auto leaf = std::make_unique<cudf::column>(cudf::column(LCW{})); auto offset = std::make_unique<cudf::column>(cudf::column(FWCW_SZ{0, 0})); auto null_mask = cudf::create_null_mask(0, cudf::mask_state::UNALLOCATED); auto expect = cudf::make_lists_column(1, std::move(offset), std::move(leaf), 0, std::move(null_mask)); auto sliced = cudf::slice(list, {1, 2}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNested) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; // Column of List<List<int>>, with incomplete hierarchy LCW list{{LCW{1}, LCW{2}}, {}, {LCW{3}, LCW{4, 5}}}; auto expect = cudf::empty_like(list); auto sliced = cudf::slice(list, {0, 0}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedZeroSliceLengthNonNested) { using LCW = cudf::test::lists_column_wrapper<TypeParam>; using FWCW_SZ = cudf::test::fixed_width_column_wrapper<cudf::size_type>; LCW list{{1, 2}, {}, {3, 4}, {8, 9}}; auto expect = cudf::empty_like(list); auto sliced = cudf::slice(list, {0, 0}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(*expect, result->view()); } TYPED_TEST(ListsColumnTest, ListsSlicedColumnViewConstructorWithNulls) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); auto expect_valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? false : true; }); using LCW = cudf::test::lists_column_wrapper<TypeParam>; cudf::test::lists_column_wrapper<TypeParam> list{ {{{{1, 2}, {3, 4}}, valids}, LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}}, valids}; cudf::test::lists_column_wrapper<TypeParam> expect{ {LCW{}, {{{5, 6, 7}, LCW{}, {8, 9}}, valids}, LCW{}, LCW{}}, expect_valids}; auto sliced = cudf::slice(list, {1, 5}).front(); auto result = std::make_unique<cudf::column>(sliced); cudf::test::expect_columns_equal(expect, result->view()); // TODO: null mask equality is being checked separately because // expect_columns_equal doesn't do the check for lists columns. // This is fixed in https://github.com/rapidsai/cudf/pull/5904, // so we should remove this check after that's merged: cudf::test::expect_columns_equal( cudf::mask_to_bools(result->view().null_mask(), 0, 4)->view(), cudf::mask_to_bools(static_cast<cudf::column_view>(expect).null_mask(), 0, 4)->view()); } CUDF_TEST_PROGRAM_MAIN()
a55daadb5ac29797124efd529aaf85d1fc9333b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "activations.h" #include "../device_context/cuda_context.h" #include <hipcub/hipcub.hpp> namespace mlfe{ namespace math{ template <class DataType> __global__ void relu_kernel(const int size, const DataType *x, DataType *y ) { CUDA_1D_KERNEL_LOOP(i, size){ y[i] = x[i] > 0 ? x[i] : 0; } } template <class DataType> __global__ void relu_gradient_kernel(const int size, const DataType *x, const DataType *dy, DataType *dx ) { CUDA_1D_KERNEL_LOOP(i, size){ dx[i] = x[i] > 0 ? dy[i] : 0; } } template <> void relu<float, CUDAContext>(const int size, const float *x, float *y ) { hipLaunchKernelGGL(( relu_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(size)), CUDA_CONTEXT_NUM_THREADS >> >(size, x, y); } template <> void relu_gradient<float, CUDAContext>(const int size, const float *x, const float *dy, float *dx ) { relu_gradient_kernel<float>, CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS, 0, 0, 0, size, x, dy, dx); } template <class DataType> __global__ void sigmoid_kernel(const int size, const DataType *x, DataType *y ) { CUDA_1D_KERNEL_LOOP(i, size){ y[i] = 1.f /(1.f + exp(-x[i])); } } template <class DataType> __global__ void sigmoid_gradient_kernel(const int size, const DataType *y, const DataType *dy, DataType *dx ) { CUDA_1D_KERNEL_LOOP(i, size){ dx[i] = dy[i] * y[i] *(1. - y[i]); } } template <> void sigmoid<float, CUDAContext>(const int size, const float *x, float *y ) { hipLaunchKernelGGL(( sigmoid_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(size)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, size, x, y); } template <> void sigmoid_gradient<float, CUDAContext>(const int size, const float *y, const float *dy, float *dx ) { hipLaunchKernelGGL(( sigmoid_gradient_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(size)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, size, y, dy, dx); } template <class DataType> __global__ void xent_kernel(const int N, const int D, const DataType *Pdata, const DataType *labeldata, DataType* Ydata ) { typedef hipcub::BlockReduce<float, CUDA_CONTEXT_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for(int i = blockIdx.x; i < N; i += gridDim.x){ DataType sum = DataType(0); for(int j = threadIdx.x; j < D; j += blockDim.x){ int idx = i * D + j; sum += log(max(Pdata[idx], DataType(1e-20))) * labeldata[idx]; } DataType tot = BlockReduce(temp_storage).Sum(sum); __syncthreads(); if(threadIdx.x == 0){ Ydata[i] = -tot; } __syncthreads(); } } template <> void cross_entropy<float, CUDAContext>(const int m, const int n, const float *prob_ptr, const float *label_ptr, float *loss_ptr ) { hipLaunchKernelGGL(( xent_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(m * n)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, prob_ptr, label_ptr, loss_ptr); } template <> void cross_entropy<double, CUDAContext>(const int m, const int n, const double *prob_ptr, const double *label_ptr, double *loss_ptr ) { hipLaunchKernelGGL(( xent_kernel<double>), dim3(CUDA_CONTEXT_GET_BLOCKS(m * n)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, prob_ptr, label_ptr, loss_ptr); } template <class DataType> __global__ void xent_gradient_kernel(const int N, const int D, const DataType* Pdata, const DataType* labeldata, const DataType* lossdata, DataType* dXdata ) { CUDA_1D_KERNEL_LOOP(n, N * D){ int idx = n / D; dXdata[n] = (Pdata[n] - labeldata[n]) * lossdata[idx]; } } template <> void cross_entropy_gradient<float, CUDAContext>(const int m, const int n, const float *prob_ptr, const float *label_ptr, const float *loss_ptr, float *dx_ptr ) { hipLaunchKernelGGL(( xent_gradient_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(m * n)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, prob_ptr, label_ptr, loss_ptr, dx_ptr); } template <> void cross_entropy_gradient<double, CUDAContext>(const int m, const int n, const double *prob_ptr, const double *label_ptr, const double *loss_ptr, double *dx_ptr ) { hipLaunchKernelGGL(( xent_gradient_kernel<double>), dim3(CUDA_CONTEXT_GET_BLOCKS(m * n)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, prob_ptr, label_ptr, loss_ptr, dx_ptr); } template <class DataType> __device__ float sigmoid_xent_forward_kernel(const DataType x, const DataType t ) { return x * t - max(x, DataType(0)) - log(DataType(1) + ::exp(-abs(x))); } template <class DataType> __global__ void sigmoid_xent_kernel(const int m, const int n, const DataType *x_ptr, const DataType *t_ptr, DataType *loss_ptr ) { int i = blockIdx.x; int last_idx =(i + 1) * n; DataType value = 0; for(int in_idx = i * n + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x){ value += sigmoid_xent_forward_kernel(x_ptr[in_idx], t_ptr[in_idx]); } typedef hipcub::BlockReduce<DataType, CUDA_CONTEXT_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; DataType sum = BlockReduce(temp_storage).Sum(value); if(threadIdx.x == 0){ loss_ptr[i] = -sum / static_cast<DataType>(n); } } template <> void sigmoid_cross_entropy<float, CUDAContext>(const int m, const int n, const float *x_ptr, const float *t_ptr, float *loss_ptr ) { hipLaunchKernelGGL(( sigmoid_xent_kernel<float>), dim3(m), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, x_ptr, t_ptr, loss_ptr); } template <class DataType> __global__ void sigmoid_xent_gradient_kernel(const int m, const int n, const DataType *x_ptr, const DataType *t_ptr, const DataType *dy_ptr, DataType *dx_ptr ) { CUDA_1D_KERNEL_LOOP(index, m * n){ int t = index / n; DataType dy = dy_ptr[t] / DataType(n); DataType sig = DataType(1) /(DataType(1) + ::exp(-x_ptr[index])); dx_ptr[index] =(sig - t_ptr[index]) * dy; } } template <> void sigmoid_cross_entropy_gradient<float, CUDAContext>(const int m, const int n, const float *x_ptr, const float *t_ptr, const float *dy_ptr, float *dx_ptr ) { hipLaunchKernelGGL(( sigmoid_xent_gradient_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(m * n)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, m, n, x_ptr, t_ptr, dy_ptr, dx_ptr); } template <class DataType> __global__ void reduce_mean_gradient_kernel(const int size, const DataType scale, const DataType *dy, DataType *dx ) { DataType dy_val = dy[0]; CUDA_1D_KERNEL_LOOP(index, size){ dx[index] = dy_val * scale; } } template <> void reduce_mean_gradient<float, CUDAContext>(const int size, const float scale, const float *dy_ptr, float *dx_ptr ) { hipLaunchKernelGGL(( reduce_mean_gradient_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(size)), dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, size, scale, dy_ptr, dx_ptr); } } // end namespace math } // end namespace mlfe
a55daadb5ac29797124efd529aaf85d1fc9333b1.cu
#include "activations.h" #include "../device_context/cuda_context.h" #include <cub/block/block_reduce.cuh> namespace mlfe{ namespace math{ template <class DataType> __global__ void relu_kernel(const int size, const DataType *x, DataType *y ) { CUDA_1D_KERNEL_LOOP(i, size){ y[i] = x[i] > 0 ? x[i] : 0; } } template <class DataType> __global__ void relu_gradient_kernel(const int size, const DataType *x, const DataType *dy, DataType *dx ) { CUDA_1D_KERNEL_LOOP(i, size){ dx[i] = x[i] > 0 ? dy[i] : 0; } } template <> void relu<float, CUDAContext>(const int size, const float *x, float *y ) { relu_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS >> >(size, x, y); } template <> void relu_gradient<float, CUDAContext>(const int size, const float *x, const float *dy, float *dx ) { relu_gradient_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS>>>(size, x, dy, dx); } template <class DataType> __global__ void sigmoid_kernel(const int size, const DataType *x, DataType *y ) { CUDA_1D_KERNEL_LOOP(i, size){ y[i] = 1.f /(1.f + exp(-x[i])); } } template <class DataType> __global__ void sigmoid_gradient_kernel(const int size, const DataType *y, const DataType *dy, DataType *dx ) { CUDA_1D_KERNEL_LOOP(i, size){ dx[i] = dy[i] * y[i] *(1. - y[i]); } } template <> void sigmoid<float, CUDAContext>(const int size, const float *x, float *y ) { sigmoid_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS>>>(size, x, y); } template <> void sigmoid_gradient<float, CUDAContext>(const int size, const float *y, const float *dy, float *dx ) { sigmoid_gradient_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS>>>(size, y, dy, dx); } template <class DataType> __global__ void xent_kernel(const int N, const int D, const DataType *Pdata, const DataType *labeldata, DataType* Ydata ) { typedef cub::BlockReduce<float, CUDA_CONTEXT_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for(int i = blockIdx.x; i < N; i += gridDim.x){ DataType sum = DataType(0); for(int j = threadIdx.x; j < D; j += blockDim.x){ int idx = i * D + j; sum += log(max(Pdata[idx], DataType(1e-20))) * labeldata[idx]; } DataType tot = BlockReduce(temp_storage).Sum(sum); __syncthreads(); if(threadIdx.x == 0){ Ydata[i] = -tot; } __syncthreads(); } } template <> void cross_entropy<float, CUDAContext>(const int m, const int n, const float *prob_ptr, const float *label_ptr, float *loss_ptr ) { xent_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(m * n), CUDA_CONTEXT_NUM_THREADS>>>(m, n, prob_ptr, label_ptr, loss_ptr); } template <> void cross_entropy<double, CUDAContext>(const int m, const int n, const double *prob_ptr, const double *label_ptr, double *loss_ptr ) { xent_kernel<double><<<CUDA_CONTEXT_GET_BLOCKS(m * n), CUDA_CONTEXT_NUM_THREADS>>>(m, n, prob_ptr, label_ptr, loss_ptr); } template <class DataType> __global__ void xent_gradient_kernel(const int N, const int D, const DataType* Pdata, const DataType* labeldata, const DataType* lossdata, DataType* dXdata ) { CUDA_1D_KERNEL_LOOP(n, N * D){ int idx = n / D; dXdata[n] = (Pdata[n] - labeldata[n]) * lossdata[idx]; } } template <> void cross_entropy_gradient<float, CUDAContext>(const int m, const int n, const float *prob_ptr, const float *label_ptr, const float *loss_ptr, float *dx_ptr ) { xent_gradient_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(m * n), CUDA_CONTEXT_NUM_THREADS>>>(m, n, prob_ptr, label_ptr, loss_ptr, dx_ptr); } template <> void cross_entropy_gradient<double, CUDAContext>(const int m, const int n, const double *prob_ptr, const double *label_ptr, const double *loss_ptr, double *dx_ptr ) { xent_gradient_kernel<double><<<CUDA_CONTEXT_GET_BLOCKS(m * n), CUDA_CONTEXT_NUM_THREADS>>>(m, n, prob_ptr, label_ptr, loss_ptr, dx_ptr); } template <class DataType> __device__ float sigmoid_xent_forward_kernel(const DataType x, const DataType t ) { return x * t - max(x, DataType(0)) - log(DataType(1) + std::exp(-abs(x))); } template <class DataType> __global__ void sigmoid_xent_kernel(const int m, const int n, const DataType *x_ptr, const DataType *t_ptr, DataType *loss_ptr ) { int i = blockIdx.x; int last_idx =(i + 1) * n; DataType value = 0; for(int in_idx = i * n + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x){ value += sigmoid_xent_forward_kernel(x_ptr[in_idx], t_ptr[in_idx]); } typedef cub::BlockReduce<DataType, CUDA_CONTEXT_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; DataType sum = BlockReduce(temp_storage).Sum(value); if(threadIdx.x == 0){ loss_ptr[i] = -sum / static_cast<DataType>(n); } } template <> void sigmoid_cross_entropy<float, CUDAContext>(const int m, const int n, const float *x_ptr, const float *t_ptr, float *loss_ptr ) { sigmoid_xent_kernel<float><<<m, CUDA_CONTEXT_NUM_THREADS>>>(m, n, x_ptr, t_ptr, loss_ptr); } template <class DataType> __global__ void sigmoid_xent_gradient_kernel(const int m, const int n, const DataType *x_ptr, const DataType *t_ptr, const DataType *dy_ptr, DataType *dx_ptr ) { CUDA_1D_KERNEL_LOOP(index, m * n){ int t = index / n; DataType dy = dy_ptr[t] / DataType(n); DataType sig = DataType(1) /(DataType(1) + std::exp(-x_ptr[index])); dx_ptr[index] =(sig - t_ptr[index]) * dy; } } template <> void sigmoid_cross_entropy_gradient<float, CUDAContext>(const int m, const int n, const float *x_ptr, const float *t_ptr, const float *dy_ptr, float *dx_ptr ) { sigmoid_xent_gradient_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(m * n), CUDA_CONTEXT_NUM_THREADS>>>(m, n, x_ptr, t_ptr, dy_ptr, dx_ptr); } template <class DataType> __global__ void reduce_mean_gradient_kernel(const int size, const DataType scale, const DataType *dy, DataType *dx ) { DataType dy_val = dy[0]; CUDA_1D_KERNEL_LOOP(index, size){ dx[index] = dy_val * scale; } } template <> void reduce_mean_gradient<float, CUDAContext>(const int size, const float scale, const float *dy_ptr, float *dx_ptr ) { reduce_mean_gradient_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(size), CUDA_CONTEXT_NUM_THREADS>>>(size, scale, dy_ptr, dx_ptr); } } // end namespace math } // end namespace mlfe
de9842622079c7cbb367af53ef56bfbc8123abf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "header.h" using namespace std; static int TimberON; static void Host(VolumeParameter *VolPrm, char *picFileName, char *volFileName); int main(int argc, char* argv[]){ VolumeParameter PrmMaster; PictureParameter PicParam; PrmMaster.picParam = &PicParam; if (argc < 7){ //GPU fprintf(stderr, "USAGE: reconstruction.exe [1]ParameterFile [2]InputFile [3]OutputFile [4]TimerSetting [5]Release [6]ThreadX [7]ThreadY\n"); exit(1); } else{ TimberON = atoi(argv[4]); } /*-------Display CUDA device properties----------------------*/ hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, 0)); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", 0, deviceProp.name, deviceProp.major, deviceProp.minor); //unsigned int CPU_THREAD_NUM = omp_get_num_procs(); //printf("Number of CPU threads = %d\n", CPU_THREAD_NUM); /*-----------------------*/ get_param(&PrmMaster, argv[1]); // /*---------------main process ---------------------------------*/ Host(&PrmMaster, argv[2], argv[3]); /*------------ Output to Disk ---------------------------------*/ //if (atoi(argv[5]) == 1){ // OutputToDisk(&PrmMaster, argv[3]); //} free_host_mem(&PrmMaster); hipDeviceReset(); if (TimberON){ printf("finished!\n"); //printf("The end."); char str1[1]; scanf("%s", &str1); } return 0; } static void Host(VolumeParameter *VolPrm, char *picFileName, char *volFileName){ /*----------------------------------------------------------------------------------------*/ hipEvent_t start0, stop0, start1, stop1, start2, stop2, start3, stop3, start4, stop4, start5, stop5, start6, stop6, start7, stop7, start8, stop8, start9, stop9, start10, stop10, start11, stop11; hipEventCreate(&start0); hipEventCreate(&start1); hipEventCreate(&start2); hipEventCreate(&start3); hipEventCreate(&start4); hipEventCreate(&start5); hipEventCreate(&start6); hipEventCreate(&start7); hipEventCreate(&start8); hipEventCreate(&start9); hipEventCreate(&start10); hipEventCreate(&start11); hipEventCreate(&stop0); hipEventCreate(&stop1); hipEventCreate(&stop2); hipEventCreate(&stop3); hipEventCreate(&stop4); hipEventCreate(&stop5); hipEventCreate(&stop6); hipEventCreate(&stop7); hipEventCreate(&stop8); hipEventCreate(&stop9); hipEventCreate(&stop10); hipEventCreate(&stop11); float timer0 = 0.0F, timer1 = 0.0F, timer2 = 0.0F, timer3 = 0.0F, timer4 = 0.0F, timer5 = 0.0F, timer6 = 0.0F, timer7 = 0.0F, timer8 = 0.0F, timer9 = 0.0F, timer10 = 0.0F, timer11 = 0.0F; if (TimberON) { hipEventRecord(start0, 0); } int EndPicture = VolPrm->picParam->np; // /*---------------- Grid size, thread size setup --------------------------*/ dim3 grid_fft(2, VolPrm->picParam->nv); dim3 threads_fft(VolPrm->picParam->nu / 2, 1); dim3 grid_rcs(VolPrm->picParam->nx / RECON_THREADx, VolPrm->picParam->ny / RECON_THREADy); dim3 threads_rcs(RECON_THREADx, RECON_THREADy); //printf("Reconstruction thread:\t\t\t(X,Y,Z) = (%d,%d,%d)\n", RECON_THREADx, RECON_THREADy, THREAD_SIZE_Z); //printf("Reconstruction block:\t\t\t(X,Y,Z) = (%d,%d,%d)\n", VolPrm->picParam->nx / RECON_THREADx, VolPrm->picParam->ny / RECON_THREADy, BLOCK_SIZE_Z); /*------------------------------------------------*/ hipStream_t *streams = NULL; // if ((streams = ((hipStream_t *)malloc(STREAM_NUM * sizeof(hipStream_t)))) == NULL){ printf("Stream malloc error\n"); exit(1); } for (int i = 0; i < STREAM_NUM; i++){ checkCudaErrors(hipStreamCreate(&(streams[i]))); } /* Allocate Host Memeory */ /* Allocate Device Memeory */ //if (CONSOLE_DISPLAY) { printf("Allocate device memory for raw picture.\n"); } float *dev_raw_pic_buf[PIC_SET_NUM]; float *dev_filtered_pic_buf[PIC_SET_NUM]; for (int i = 0; i < PIC_SET_NUM; i++){ checkCudaErrors(hipMalloc((void**)&dev_raw_pic_buf[i], VolPrm->pic_data_size)); checkCudaErrors(hipMalloc((void**)&dev_filtered_pic_buf[i], VolPrm->pic_data_size)); checkCudaErrors(hipMemset(dev_raw_pic_buf[i], 0, VolPrm->pic_data_size)); checkCudaErrors(hipMemset(dev_filtered_pic_buf[i], 0, VolPrm->pic_data_size)); } float * dev_merge_pic; checkCudaErrors(hipMalloc((void**)&dev_merge_pic, VolPrm->pic_data_size * PIC_SET_NUM)); /*------------------- texture memory ---------------------------------------*/ hipChannelFormatDesc desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipExtent extent; extent.width = VolPrm->picParam->nu; extent.height = VolPrm->picParam->nv; extent.depth = PIC_SET_NUM; hipArray_t tex_buf; checkCudaErrors(hipMalloc3DArray(&tex_buf, &desc, extent, hipArrayLayered)); hipResourceDesc resdesc; resdesc.resType = hipResourceTypeArray; resdesc.res.array.array = tex_buf; hipTextureDesc texdesc; memset(&texdesc, 0, sizeof(hipTextureDesc)); texdesc.normalizedCoords = 0; //indicates whether texture reads are normalized or not texdesc.filterMode = hipFilterModeLinear; texdesc.addressMode[0] = hipAddressModeClamp; texdesc.addressMode[1] = hipAddressModeClamp; texdesc.addressMode[2] = hipAddressModeClamp; texdesc.readMode = hipReadModeElementType; hipTextureObject_t texObjt; checkCudaErrors(hipCreateTextureObject(&texObjt, &resdesc, &texdesc, NULL)); /*---------------- parameter for copy data to texture memory --------------------- */ hipMemcpy3DParms memcpy3dparam = { 0 }; memcpy3dparam.srcPos = make_hipPos(0, 0, 0); memcpy3dparam.dstPos = make_hipPos(0, 0, 0); memcpy3dparam.srcPtr = make_hipPitchedPtr(dev_merge_pic, VolPrm->picParam->nu * sizeof(float), VolPrm->picParam->nu, VolPrm->picParam->nv); memcpy3dparam.dstArray = tex_buf; memcpy3dparam.extent = make_hipExtent(VolPrm->picParam->nu, VolPrm->picParam->nv, PIC_SET_NUM); memcpy3dparam.kind = hipMemcpyDeviceToDevice; /*-------------- device memeory allocation for sub volume --------------*/ float* dev_vol_buf; hipMalloc((void**)&dev_vol_buf, VolPrm->subVol_data_size); hipMemset(dev_vol_buf, 0, VolPrm->subVol_data_size); if (CONSOLE_DISPLAY) { printf("memory allocation finished.\n"); } /* Allocate Device Memeory */ FILE *picFile; if ((picFile = fopen(picFileName, "rb")) == NULL){ printf("%s is not exist\n", picFileName); exit(1); } /*The following is for angle - time test*/ ofstream angletime; angletime.open("angletime.csv"); float timeArray[1200/PIC_SET_NUM] = {0.0f};// 1200/20=60 /* File Open finished */ float ftemp; if (TimberON) { hipEventRecord(start2, 0); } for (int picture_ID = 0; picture_ID < EndPicture; picture_ID += PIC_SET_NUM){ /*---------------------Copy raw picture from host to device -----------------------------*/ for (int i = 0; i < STREAM_NUM; i++){ checkCudaErrors(hipHostMalloc((void**)&VolPrm->picture.picArray[picture_ID + i], VolPrm->pic_data_size, hipHostMallocDefault)); fread(VolPrm->picture.picArray[picture_ID + i], sizeof(float), VolPrm->pixel_num, picFile); checkCudaErrors(hipMemcpyAsync(dev_raw_pic_buf[i], VolPrm->picture.picArray[picture_ID + i], VolPrm->pic_data_size, hipMemcpyHostToDevice, streams[i])); } /* Filter CUDA kernel */ if (TimberON) { hipEventRecord(start3, 0); } printf("f"); /* for (int i = 0; i < STREAM_NUM; i++){ convolution <<< grid_fft, threads_fft, 2 * VolPrm->picParam->nu * sizeof(float), streams[i] >>> (dev_raw_pic_buf[i], dev_filtered_pic_buf[i], *(VolPrm->picParam)); getLastCudaError("filter kernel failed\n"); } */ //hipDeviceSynchronize(); if (TimberON) { hipEventRecord(stop3, 0); hipEventSynchronize(stop3); } hipEventElapsedTime(&ftemp, start3, stop3); timer3 += ftemp; /* Filter CUDA kernel */ /*----------------------Copy filtered picture from device to host.(OVERWRITE)---------------------*/ for (int i = 0; i < STREAM_NUM; i++){ checkCudaErrors(hipMemcpyAsync(VolPrm->picture.picArray[picture_ID + i], dev_raw_pic_buf[i], VolPrm->pic_data_size, hipMemcpyDeviceToHost, streams[i])); } } if (TimberON) { hipEventRecord(stop2, 0); hipEventSynchronize(stop2); } fclose(picFile); if (TimberON) { hipEventRecord(start7, 0); } /*------------------ Reconstruct the Slice sequentially----------------------------*/ // for (int Sub_Vol_ID = StartVolume; Sub_Vol_ID < VolPrm->picParam->nz / Z_PIXEL; Sub_Vol_ID++){ for (int Sub_Vol_ID = 0; Sub_Vol_ID < 1; Sub_Vol_ID++){ /*---------------------Copy filtered first PIC_SET_NUM pictures from host to device texture memory----------*/ for (int picture_ID = 0; picture_ID < EndPicture; picture_ID += PIC_SET_NUM){ // for (int picture_ID = 0; picture_ID < PIC_SET_NUM; picture_ID += PIC_SET_NUM){ if (picture_ID == 0){ for (int i = 0; i < STREAM_NUM; i++){ checkCudaErrors(hipMemcpyAsync(&dev_merge_pic[i * VolPrm->pixel_num], VolPrm->picture.picArray[picture_ID + i], VolPrm->pic_data_size, hipMemcpyHostToDevice, streams[i])); } checkCudaErrors(hipMemcpy3D(&memcpy3dparam));//Can't be async here! } /*---------------------Copy NEXT loop filtered picture from host to device global memory(temporary buffer)---*/ if ((picture_ID + PIC_SET_NUM) < EndPicture){ for (int i = 0; i < STREAM_NUM; i++){ checkCudaErrors(hipMemcpyAsync(&dev_merge_pic[i * VolPrm->pixel_num], VolPrm->picture.picArray[picture_ID + i + PIC_SET_NUM], //data for NEXT loop VolPrm->pic_data_size, hipMemcpyHostToDevice, streams[i])); } } /*----------------------- reconstruction kernel ---------------------------------------------- */ if (CONSOLE_DISPLAY) { printf("r"); } hipProfilerStart(); if (TimberON) { hipEventRecord(start6, 0); } hipLaunchKernelGGL(( reconstruction) , dim3(grid_rcs), dim3(threads_rcs) , 0, 0, dev_vol_buf, picture_ID, Sub_Vol_ID, texObjt, *(VolPrm->picParam)); getLastCudaError("reconstruction CUDA kernel failed\n"); hipDeviceSynchronize(); /*Block CPU until ALL GPU finish job*/ if (TimberON) { hipEventRecord(stop6, 0); hipEventSynchronize(stop6); } hipEventElapsedTime(&ftemp, start6, stop6); hipProfilerStop(); timer6 += ftemp; /*for angle test*/timeArray[picture_ID / PIC_SET_NUM]+= ftemp; checkCudaErrors(hipMemcpy3DAsync(&memcpy3dparam)); } /*-----------------------copy volume result from device to host-----------------------------------------*/ checkCudaErrors(hipMemcpy(VolPrm->subVolume, dev_vol_buf, VolPrm->subVol_data_size, hipMemcpyDeviceToHost)); /*--------------clear the volume buffer from next sub volume ----------------------------*/ checkCudaErrors(hipMemsetAsync(dev_vol_buf, 0, VolPrm->subVol_data_size)); {OutputToDisk(VolPrm, Sub_Vol_ID, volFileName);} } //for (int Sub_Vol_ID = StartVolume; Sub_Vol_ID < VolPrm->picParam->nz / Z_PIXEL; Sub_Vol_ID++) if (TimberON) { hipEventRecord(stop7, 0); hipEventSynchronize(stop7); } printf("\nReconstruction finished!\n"); /* Finish restruction */ /* FILE *filtered; if ((filtered = fopen("filteredPic", "wb")) == NULL){ printf("filteredPic can not open\n"); exit(1); } printf("Copying filtered picture to disk"); for (int i = 0; i < VolPrm->picParam->np; i++){ if (i % 50 == 0){ printf("."); } fwrite(VolPrm->picture.picArray[i], sizeof(float), VolPrm->pixel_num, filtered); } printf("done\n"); */ /*The following is for test angle*/ for(int i=0; i<(1200 / PIC_SET_NUM); i++){ angletime << timeArray[i] <<endl; } angletime << timer6; angletime.close(); if (TimberON) { hipEventRecord(stop0, 0); hipEventSynchronize(stop0); } /*-------------- Free Memeory and Destory object --------------------------------------*/ checkCudaErrors(hipFree(dev_vol_buf)); checkCudaErrors(hipFree(dev_merge_pic)); checkCudaErrors(hipFreeArray(tex_buf)); checkCudaErrors(hipDestroyTextureObject(texObjt)); for (int i = 0; i < PIC_SET_NUM; i++){ checkCudaErrors(hipFree(dev_raw_pic_buf[i])); checkCudaErrors(hipFree(dev_filtered_pic_buf[i])); checkCudaErrors(hipStreamDestroy(streams[i])); } /*-----------------------------------------------------------------*/ if (TimberON){ hipEventElapsedTime(&timer1, start1, stop1); hipEventElapsedTime(&timer2, start2, stop2); hipEventElapsedTime(&timer4, start4, stop4); hipEventElapsedTime(&timer5, start5, stop5); hipEventElapsedTime(&timer7, start7, stop7); hipEventElapsedTime(&timer8, start8, stop8); hipEventElapsedTime(&timer9, start9, stop9); hipEventElapsedTime(&timer10, start10, stop10); hipEventElapsedTime(&timer11, start11, stop11); //printf("Parameter translation time : %.2f\n", timer1); //printf("VRAM allocation time : %.2f\n", timer9); //printf("GPU synchronization time : %.2f\n", timer5); //printf("Volume initialization time : %.2f\n", timer8); //printf("Projection download stream time : %.4f\n", timer2); //printf("Projection download non stream time : %.4f\n", timer10); //printf("Projection copy time : %.2f\n", timer11); //printf("Projection writeback stream time : %.2f\n", timer4); //printf("Volume readback time : %.2f\n", timer7); printf("Copy and filter time : %.0f[ms]\n", timer2); printf("filter only time : %.0f[ms]\n", timer3); printf("Reconstruction time : %.0f[ms]\n", timer6); } printf("Total time of reconstruction : %.0f[ms]\n", timer7); hipEventElapsedTime(&timer0, start0, stop0); printf("Total time : %.0f[ms]\n", timer0); hipEventDestroy(start0); hipEventDestroy(stop0); hipEventDestroy(start1); hipEventDestroy(stop1); hipEventDestroy(start2); hipEventDestroy(stop2); hipEventDestroy(start3); hipEventDestroy(stop3); hipEventDestroy(start4); hipEventDestroy(stop4); hipEventDestroy(start5); hipEventDestroy(stop5); hipEventDestroy(start6); hipEventDestroy(stop6); hipEventDestroy(start7); hipEventDestroy(stop7); hipEventDestroy(start8); hipEventDestroy(stop8); hipEventDestroy(start9); hipEventDestroy(stop9); hipEventDestroy(start10); hipEventDestroy(stop10); hipEventDestroy(start11); hipEventDestroy(stop11); }
de9842622079c7cbb367af53ef56bfbc8123abf9.cu
#include "header.h" using namespace std; static int TimberON; static void Host(VolumeParameter *VolPrm, char *picFileName, char *volFileName); int main(int argc, char* argv[]){ VolumeParameter PrmMaster; PictureParameter PicParam; PrmMaster.picParam = &PicParam; if (argc < 7){ //実行ファイル名,パラメータファイル,入力ファイル,出力ファイル,最大GPU数を指定する必要がある. fprintf(stderr, "USAGE: reconstruction.exe [1]ParameterFile [2]InputFile [3]OutputFile [4]TimerSetting [5]Release [6]ThreadX [7]ThreadY\n"); exit(1); } else{ TimberON = atoi(argv[4]); } /*-------Display CUDA device properties----------------------*/ cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, 0)); printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", 0, deviceProp.name, deviceProp.major, deviceProp.minor); //unsigned int CPU_THREAD_NUM = omp_get_num_procs(); //printf("Number of CPU threads = %d\n", CPU_THREAD_NUM); /*--------再構成パラメータの取得と必要な領域の確保---------------*/ get_param(&PrmMaster, argv[1]); //再構成パラメータを取得 /*---------------main process ---------------------------------*/ Host(&PrmMaster, argv[2], argv[3]); /*------------ Output to Disk ---------------------------------*/ //if (atoi(argv[5]) == 1){ // OutputToDisk(&PrmMaster, argv[3]); //} free_host_mem(&PrmMaster); cudaDeviceReset(); if (TimberON){ printf("finished!\n"); //printf("The end."); char str1[1]; scanf("%s", &str1); } return 0; } static void Host(VolumeParameter *VolPrm, char *picFileName, char *volFileName){ /*--------------------------------------------タイマー設定--------------------------------------------*/ cudaEvent_t start0, stop0, start1, stop1, start2, stop2, start3, stop3, start4, stop4, start5, stop5, start6, stop6, start7, stop7, start8, stop8, start9, stop9, start10, stop10, start11, stop11; cudaEventCreate(&start0); cudaEventCreate(&start1); cudaEventCreate(&start2); cudaEventCreate(&start3); cudaEventCreate(&start4); cudaEventCreate(&start5); cudaEventCreate(&start6); cudaEventCreate(&start7); cudaEventCreate(&start8); cudaEventCreate(&start9); cudaEventCreate(&start10); cudaEventCreate(&start11); cudaEventCreate(&stop0); cudaEventCreate(&stop1); cudaEventCreate(&stop2); cudaEventCreate(&stop3); cudaEventCreate(&stop4); cudaEventCreate(&stop5); cudaEventCreate(&stop6); cudaEventCreate(&stop7); cudaEventCreate(&stop8); cudaEventCreate(&stop9); cudaEventCreate(&stop10); cudaEventCreate(&stop11); float timer0 = 0.0F, timer1 = 0.0F, timer2 = 0.0F, timer3 = 0.0F, timer4 = 0.0F, timer5 = 0.0F, timer6 = 0.0F, timer7 = 0.0F, timer8 = 0.0F, timer9 = 0.0F, timer10 = 0.0F, timer11 = 0.0F; if (TimberON) { cudaEventRecord(start0, 0); } int EndPicture = VolPrm->picParam->np; //投影像の枚 /*---------------- Grid size, thread size setup --------------------------*/ dim3 grid_fft(2, VolPrm->picParam->nv); dim3 threads_fft(VolPrm->picParam->nu / 2, 1); dim3 grid_rcs(VolPrm->picParam->nx / RECON_THREADx, VolPrm->picParam->ny / RECON_THREADy); dim3 threads_rcs(RECON_THREADx, RECON_THREADy); //printf("Reconstruction thread:\t\t\t(X,Y,Z) = (%d,%d,%d)\n", RECON_THREADx, RECON_THREADy, THREAD_SIZE_Z); //printf("Reconstruction block:\t\t\t(X,Y,Z) = (%d,%d,%d)\n", VolPrm->picParam->nx / RECON_THREADx, VolPrm->picParam->ny / RECON_THREADy, BLOCK_SIZE_Z); /*----------------ストリーム用の領域を確保--------------------------------*/ cudaStream_t *streams = NULL; //フィルタリングに使用するストリーム数の領域を確保 if ((streams = ((cudaStream_t *)malloc(STREAM_NUM * sizeof(cudaStream_t)))) == NULL){ printf("Stream malloc error\n"); exit(1); } for (int i = 0; i < STREAM_NUM; i++){ checkCudaErrors(cudaStreamCreate(&(streams[i]))); } /*↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑ Allocate Host Memeory ↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑*/ /*↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ Allocate Device Memeory ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓*/ //if (CONSOLE_DISPLAY) { printf("Allocate device memory for raw picture.\n"); } float *dev_raw_pic_buf[PIC_SET_NUM]; float *dev_filtered_pic_buf[PIC_SET_NUM]; for (int i = 0; i < PIC_SET_NUM; i++){ checkCudaErrors(cudaMalloc((void**)&dev_raw_pic_buf[i], VolPrm->pic_data_size)); checkCudaErrors(cudaMalloc((void**)&dev_filtered_pic_buf[i], VolPrm->pic_data_size)); checkCudaErrors(cudaMemset(dev_raw_pic_buf[i], 0, VolPrm->pic_data_size)); checkCudaErrors(cudaMemset(dev_filtered_pic_buf[i], 0, VolPrm->pic_data_size)); } float * dev_merge_pic; checkCudaErrors(cudaMalloc((void**)&dev_merge_pic, VolPrm->pic_data_size * PIC_SET_NUM)); /*------------------- texture memory ---------------------------------------*/ cudaChannelFormatDesc desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaExtent extent; extent.width = VolPrm->picParam->nu; extent.height = VolPrm->picParam->nv; extent.depth = PIC_SET_NUM; cudaArray_t tex_buf; checkCudaErrors(cudaMalloc3DArray(&tex_buf, &desc, extent, cudaArrayLayered)); cudaResourceDesc resdesc; resdesc.resType = cudaResourceTypeArray; resdesc.res.array.array = tex_buf; cudaTextureDesc texdesc; memset(&texdesc, 0, sizeof(cudaTextureDesc)); texdesc.normalizedCoords = 0; //indicates whether texture reads are normalized or not texdesc.filterMode = cudaFilterModeLinear; texdesc.addressMode[0] = cudaAddressModeClamp; texdesc.addressMode[1] = cudaAddressModeClamp; texdesc.addressMode[2] = cudaAddressModeClamp; texdesc.readMode = cudaReadModeElementType; cudaTextureObject_t texObjt; checkCudaErrors(cudaCreateTextureObject(&texObjt, &resdesc, &texdesc, NULL)); /*---------------- parameter for copy data to texture memory --------------------- */ cudaMemcpy3DParms memcpy3dparam = { 0 }; memcpy3dparam.srcPos = make_cudaPos(0, 0, 0); memcpy3dparam.dstPos = make_cudaPos(0, 0, 0); memcpy3dparam.srcPtr = make_cudaPitchedPtr(dev_merge_pic, VolPrm->picParam->nu * sizeof(float), VolPrm->picParam->nu, VolPrm->picParam->nv); memcpy3dparam.dstArray = tex_buf; memcpy3dparam.extent = make_cudaExtent(VolPrm->picParam->nu, VolPrm->picParam->nv, PIC_SET_NUM); memcpy3dparam.kind = cudaMemcpyDeviceToDevice; /*-------------- device memeory allocation for sub volume --------------*/ float* dev_vol_buf; cudaMalloc((void**)&dev_vol_buf, VolPrm->subVol_data_size); cudaMemset(dev_vol_buf, 0, VolPrm->subVol_data_size); if (CONSOLE_DISPLAY) { printf("memory allocation finished.\n"); } /*↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑ Allocate Device Memeory ↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑*/ FILE *picFile; if ((picFile = fopen(picFileName, "rb")) == NULL){ printf("%s is not exist\n", picFileName); exit(1); } /*The following is for angle - time test*/ ofstream angletime; angletime.open("angletime.csv"); float timeArray[1200/PIC_SET_NUM] = {0.0f};// 1200/20=60 /*↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑ File Open finished ↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑*/ float ftemp; if (TimberON) { cudaEventRecord(start2, 0); } for (int picture_ID = 0; picture_ID < EndPicture; picture_ID += PIC_SET_NUM){ /*---------------------Copy raw picture from host to device -----------------------------*/ for (int i = 0; i < STREAM_NUM; i++){ checkCudaErrors(cudaHostAlloc((void**)&VolPrm->picture.picArray[picture_ID + i], VolPrm->pic_data_size, cudaHostAllocDefault)); fread(VolPrm->picture.picArray[picture_ID + i], sizeof(float), VolPrm->pixel_num, picFile); checkCudaErrors(cudaMemcpyAsync(dev_raw_pic_buf[i], VolPrm->picture.picArray[picture_ID + i], VolPrm->pic_data_size, cudaMemcpyHostToDevice, streams[i])); } /*↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓ Filter CUDA kernel ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓*/ if (TimberON) { cudaEventRecord(start3, 0); } printf("f"); /* for (int i = 0; i < STREAM_NUM; i++){ convolution <<< grid_fft, threads_fft, 2 * VolPrm->picParam->nu * sizeof(float), streams[i] >>> (dev_raw_pic_buf[i], dev_filtered_pic_buf[i], *(VolPrm->picParam)); getLastCudaError("filter kernel failed\n"); } */ //cudaThreadSynchronize(); if (TimberON) { cudaEventRecord(stop3, 0); cudaEventSynchronize(stop3); } cudaEventElapsedTime(&ftemp, start3, stop3); timer3 += ftemp; /*↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑ Filter CUDA kernel ↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑*/ /*----------------------Copy filtered picture from device to host.(OVERWRITE)---------------------*/ for (int i = 0; i < STREAM_NUM; i++){ checkCudaErrors(cudaMemcpyAsync(VolPrm->picture.picArray[picture_ID + i], dev_raw_pic_buf[i], VolPrm->pic_data_size, cudaMemcpyDeviceToHost, streams[i])); } } if (TimberON) { cudaEventRecord(stop2, 0); cudaEventSynchronize(stop2); } fclose(picFile); if (TimberON) { cudaEventRecord(start7, 0); } /*------------------ Reconstruct the Slice sequentially----------------------------*/ // for (int Sub_Vol_ID = StartVolume; Sub_Vol_ID < VolPrm->picParam->nz / Z_PIXEL; Sub_Vol_ID++){ for (int Sub_Vol_ID = 0; Sub_Vol_ID < 1; Sub_Vol_ID++){ /*---------------------Copy filtered first PIC_SET_NUM pictures from host to device texture memory----------*/ for (int picture_ID = 0; picture_ID < EndPicture; picture_ID += PIC_SET_NUM){ // for (int picture_ID = 0; picture_ID < PIC_SET_NUM; picture_ID += PIC_SET_NUM){ if (picture_ID == 0){ for (int i = 0; i < STREAM_NUM; i++){ checkCudaErrors(cudaMemcpyAsync(&dev_merge_pic[i * VolPrm->pixel_num], VolPrm->picture.picArray[picture_ID + i], VolPrm->pic_data_size, cudaMemcpyHostToDevice, streams[i])); } checkCudaErrors(cudaMemcpy3D(&memcpy3dparam));//Can't be async here! } /*---------------------Copy NEXT loop filtered picture from host to device global memory(temporary buffer)---*/ if ((picture_ID + PIC_SET_NUM) < EndPicture){ for (int i = 0; i < STREAM_NUM; i++){ checkCudaErrors(cudaMemcpyAsync(&dev_merge_pic[i * VolPrm->pixel_num], VolPrm->picture.picArray[picture_ID + i + PIC_SET_NUM], //data for NEXT loop VolPrm->pic_data_size, cudaMemcpyHostToDevice, streams[i])); } } /*----------------------- reconstruction kernel ---------------------------------------------- */ if (CONSOLE_DISPLAY) { printf("r"); } cudaProfilerStart(); if (TimberON) { cudaEventRecord(start6, 0); } reconstruction <<< grid_rcs, threads_rcs >>> (dev_vol_buf, picture_ID, Sub_Vol_ID, texObjt, *(VolPrm->picParam)); getLastCudaError("reconstruction CUDA kernel failed\n"); cudaThreadSynchronize(); /*Block CPU until ALL GPU finish job*/ if (TimberON) { cudaEventRecord(stop6, 0); cudaEventSynchronize(stop6); } cudaEventElapsedTime(&ftemp, start6, stop6); cudaProfilerStop(); timer6 += ftemp; /*for angle test*/timeArray[picture_ID / PIC_SET_NUM]+= ftemp; checkCudaErrors(cudaMemcpy3DAsync(&memcpy3dparam)); } /*-----------------------copy volume result from device to host-----------------------------------------*/ checkCudaErrors(cudaMemcpy(VolPrm->subVolume, dev_vol_buf, VolPrm->subVol_data_size, cudaMemcpyDeviceToHost)); /*--------------clear the volume buffer from next sub volume ----------------------------*/ checkCudaErrors(cudaMemsetAsync(dev_vol_buf, 0, VolPrm->subVol_data_size)); {OutputToDisk(VolPrm, Sub_Vol_ID, volFileName);} } //for (int Sub_Vol_ID = StartVolume; Sub_Vol_ID < VolPrm->picParam->nz / Z_PIXEL; Sub_Vol_ID++) if (TimberON) { cudaEventRecord(stop7, 0); cudaEventSynchronize(stop7); } printf("\nReconstruction finished!\n"); /*↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑ Finish restruction ↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑*/ /* FILE *filtered; if ((filtered = fopen("filteredPic", "wb")) == NULL){ printf("filteredPic can not open\n"); exit(1); } printf("Copying filtered picture to disk"); for (int i = 0; i < VolPrm->picParam->np; i++){ if (i % 50 == 0){ printf("."); } fwrite(VolPrm->picture.picArray[i], sizeof(float), VolPrm->pixel_num, filtered); } printf("done\n"); */ /*The following is for test angle*/ for(int i=0; i<(1200 / PIC_SET_NUM); i++){ angletime << timeArray[i] <<endl; } angletime << timer6; angletime.close(); if (TimberON) { cudaEventRecord(stop0, 0); cudaEventSynchronize(stop0); } /*-------------- Free Memeory and Destory object --------------------------------------*/ checkCudaErrors(cudaFree(dev_vol_buf)); checkCudaErrors(cudaFree(dev_merge_pic)); checkCudaErrors(cudaFreeArray(tex_buf)); checkCudaErrors(cudaDestroyTextureObject(texObjt)); for (int i = 0; i < PIC_SET_NUM; i++){ checkCudaErrors(cudaFree(dev_raw_pic_buf[i])); checkCudaErrors(cudaFree(dev_filtered_pic_buf[i])); checkCudaErrors(cudaStreamDestroy(streams[i])); } /*-----------------------実行時間の表示------------------------------------------*/ if (TimberON){ cudaEventElapsedTime(&timer1, start1, stop1); cudaEventElapsedTime(&timer2, start2, stop2); cudaEventElapsedTime(&timer4, start4, stop4); cudaEventElapsedTime(&timer5, start5, stop5); cudaEventElapsedTime(&timer7, start7, stop7); cudaEventElapsedTime(&timer8, start8, stop8); cudaEventElapsedTime(&timer9, start9, stop9); cudaEventElapsedTime(&timer10, start10, stop10); cudaEventElapsedTime(&timer11, start11, stop11); //printf("Parameter translation time : %.2f\n", timer1); //printf("VRAM allocation time : %.2f\n", timer9); //printf("GPU synchronization time : %.2f\n", timer5); //printf("Volume initialization time : %.2f\n", timer8); //printf("Projection download stream time : %.4f\n", timer2); //printf("Projection download non stream time : %.4f\n", timer10); //printf("Projection copy time : %.2f\n", timer11); //printf("Projection writeback stream time : %.2f\n", timer4); //printf("Volume readback time : %.2f\n", timer7); printf("Copy and filter time : %.0f[ms]\n", timer2); printf("filter only time : %.0f[ms]\n", timer3); printf("Reconstruction time : %.0f[ms]\n", timer6); } printf("Total time of reconstruction : %.0f[ms]\n", timer7); cudaEventElapsedTime(&timer0, start0, stop0); printf("Total time : %.0f[ms]\n", timer0); cudaEventDestroy(start0); cudaEventDestroy(stop0); cudaEventDestroy(start1); cudaEventDestroy(stop1); cudaEventDestroy(start2); cudaEventDestroy(stop2); cudaEventDestroy(start3); cudaEventDestroy(stop3); cudaEventDestroy(start4); cudaEventDestroy(stop4); cudaEventDestroy(start5); cudaEventDestroy(stop5); cudaEventDestroy(start6); cudaEventDestroy(stop6); cudaEventDestroy(start7); cudaEventDestroy(stop7); cudaEventDestroy(start8); cudaEventDestroy(stop8); cudaEventDestroy(start9); cudaEventDestroy(stop9); cudaEventDestroy(start10); cudaEventDestroy(stop10); cudaEventDestroy(start11); cudaEventDestroy(stop11); }
e67a26a5a9fbcfde3083dcc8375a6d2dc89abc8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define mod(x,y) ( (x) & (y-1)) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X); float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X); int rowy = FORMA_BLOCKDIM_Y+32; //int threadIdx_y = mod((int)threadIdx.y,2); int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-4); for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) { int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ __tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__]; } __syncthreads(); int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ; if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__-1),rowy)]); float __temp_5__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__+1),rowy)]); float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_19__; } } __syncthreads(); int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ; if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__-1),rowy)]); float __temp_35__ = (__tilevar_1__[__iter_11__+(-1)-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__+1),rowy)]); float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); __var_1__[__iter_11__+(M)*(__iter_10__)] = __temp_49__; } } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; hipMalloc(&input,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input); } float * __var_1__; hipMalloc(&__var_1__,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __var_2__; hipMalloc(&__var_2__,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 32; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4); int __grid_1___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_2__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); hipFree(__var_2__); } /*Host Free End*/
e67a26a5a9fbcfde3083dcc8375a6d2dc89abc8d.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define mod(x,y) ( (x) & (y-1)) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); __kernel_init__<<<init_grid,init_block>>>(d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X); float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X); int rowy = FORMA_BLOCKDIM_Y+32; //int threadIdx_y = mod((int)threadIdx.y,2); int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-4); for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) { int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ __tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__]; } __syncthreads(); int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ; if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){ int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ; if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__-1),rowy)]); float __temp_5__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__+1),rowy)]); float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); __tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_19__; } } __syncthreads(); int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ; if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){ int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ; if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__-1),rowy)]); float __temp_35__ = (__tilevar_1__[__iter_11__+(-1)-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__+1),rowy)]); float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); __var_1__[__iter_11__+(M)*(__iter_10__)] = __temp_49__; } } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+32)*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; cudaMalloc(&input,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input); } float * __var_1__; cudaMalloc(&__var_1__,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __var_2__; cudaMalloc(&__var_2__,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 32; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4); int __grid_1___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_2__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); cudaFree(__var_2__); } /*Host Free End*/
968ed45d4b745a30d6c1067b7d24fdd17a0bef8a.hip
// !!! This is a file automatically generated by hipify!!! #include "../../Shared/KernelShared.hpp" #include "DataDesc.hpp" #pragma warning(push, 0) #include <mi/mdl_sdk.h> #include <vector_functions.hpp> #pragma warning(pop) namespace MDL = mi::neuraylib; // From examples/mdl_sdk/shared/texture_support_cuda.h #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #define M_ONE_OVER_PI 0.318309886183790671538 using MDL::Mbsdf_part; using MDL::Tex_wrap_mode; using MDL::Texture_handler_base; // Custom structure representing an MDL texture, containing filtered and // unfiltered CUDA texture objects and the size of the texture. struct Texture final { hipTextureObject_t filtered_object; // uses filter mode hipFilterModeLinear hipTextureObject_t unfiltered_object; // uses filter mode hipFilterModePoint uint3 size; // size of the texture, needed for texel access float3 inv_size; // the inverse values of the size of the texture }; // Custom structure representing an MDL BSDF measurement. struct Mbsdf { unsigned has_data[2]; // true if there is a measurement for this part hipTextureObject_t eval_data[2]; // uses filter mode hipFilterModeLinear float max_albedo[2]; // max albedo used to limit the multiplier float* sample_data[2]; // CDFs for sampling a BSDF measurement float* albedo_data[2]; // max albedo for each theta (isotropic) uint2 angular_resolution[2]; // size of the dataset, needed for texel access float2 inv_angular_resolution[2]; // the inverse values of the size of the // dataset unsigned num_channels[2]; // number of color channels (1 or 3) }; // Structure representing a Light Profile struct Lightprofile { __device__ explicit Lightprofile() : angular_resolution(make_uint2(0, 0)), theta_phi_start(make_float2(0.0f, 0.0f)), theta_phi_delta(make_float2(0.0f, 0.0f)), theta_phi_inv_delta(make_float2(0.0f, 0.0f)), candela_multiplier(0.0f), total_power(0.0f), eval_data(0) {} uint2 angular_resolution; // angular resolution of the grid float2 theta_phi_start; // start of the grid float2 theta_phi_delta; // angular step size float2 theta_phi_inv_delta; // inverse step size float candela_multiplier; // factor to rescale the normalized data float total_power; hipTextureObject_t eval_data; // normalized data sampled on grid float* cdf_data; // CDFs for sampling a light profile }; // The texture handler structure required by the MDL SDK with custom additional // fields. struct Texture_handler : MDL::Texture_handler_base { // additional data for the texture access functions can be provided here size_t num_textures; // the number of textures used by the material // (without the invalid texture) Texture const* textures; // the textures used by the material // (without the invalid texture) size_t num_mbsdfs; // the number of mbsdfs used by the material // (without the invalid mbsdf) Mbsdf const* mbsdfs; // the mbsdfs used by the material // (without the invalid mbsdf) size_t num_lightprofiles; // number of elements in the lightprofiles field // (without the invalid light profile) Lightprofile const* lightprofiles; // a device pointer to a list of mbsdfs objects, if used // (without the invalid light profile) }; DEVICE void bsdf_init(MDL::Shading_state_material* state, const MDL::Resource_data* res_data, const void* exception_state, const char* arg_block_data); DEVICE void bsdf_sample(MDL::Bsdf_sample_data* data, MDL::Shading_state_material* state, const MDL::Resource_data* res_data, const void* exception_state, const char* arg_block_data); DEVICE void bsdf_evaluate(MDL::Bsdf_evaluate_data<MDL::DF_HSM_NONE>* data, MDL::Shading_state_material* state, const MDL::Resource_data* res_data, const void* exception_state, const char* arg_block_data); DEVICE void bsdf_pdf(MDL::Bsdf_pdf_data* data, MDL::Shading_state_material* state, const MDL::Resource_data* res_data, const void* exception_state, const char* arg_block_data); // From examples/mdl_sdk/shared/texture_support_cuda.h // Stores a float4 in a float[4] array. INLINEDEVICE void store_result4(float res[4], const float4& v) { res[0] = v.x; res[1] = v.y; res[2] = v.z; res[3] = v.w; } // Stores a float in all elements of a float[4] array. INLINEDEVICE void store_result4(float res[4], float s) { res[0] = res[1] = res[2] = res[3] = s; } // Stores the given float values in a float[4] array. INLINEDEVICE void store_result4(float res[4], float v0, float v1, float v2, float v3) { res[0] = v0; res[1] = v1; res[2] = v2; res[3] = v3; } // Stores a float3 in a float[3] array. INLINEDEVICE void store_result3(float res[3], float3 const& v) { res[0] = v.x; res[1] = v.y; res[2] = v.z; } // Stores a float4 in a float[3] array, ignoring v.w. INLINEDEVICE void store_result3(float res[3], const float4& v) { res[0] = v.x; res[1] = v.y; res[2] = v.z; } // Stores a float in all elements of a float[3] array. INLINEDEVICE void store_result3(float res[3], float s) { res[0] = res[1] = res[2] = s; } // Stores the given float values in a float[3] array. INLINEDEVICE void store_result3(float res[3], float v0, float v1, float v2) { res[0] = v0; res[1] = v1; res[2] = v2; } // Stores the luminance if a given float[3] in a float. INLINEDEVICE void store_result1(float* res, float3 const& v) { // store luminance *res = 0.212671 * v.x + 0.715160 * v.y + 0.072169 * v.z; } // Stores the luminance if a given float[3] in a float. INLINEDEVICE void store_result1(float* res, float v0, float v1, float v2) { // store luminance *res = 0.212671 * v0 + 0.715160 * v1 + 0.072169 * v2; } // Stores a given float in a float INLINEDEVICE void store_result1(float* res, float s) { *res = s; } // ------------------------------------------------------------------------------------------------ // Textures // ------------------------------------------------------------------------------------------------ // Applies wrapping and cropping to the given coordinate. // Note: This macro returns if wrap mode is clip and the coordinate is out of // range. #define WRAP_AND_CROP_OR_RETURN_BLACK(val, inv_dim, wrap_mode, crop_vals, \ store_res_func) \ do { \ if((wrap_mode) == MDL::TEX_WRAP_REPEAT && (crop_vals)[0] == 0.0f && \ (crop_vals)[1] == 1.0f) { \ /* Do nothing, use texture sampler default behavior */ \ } else { \ if((wrap_mode) == MDL::TEX_WRAP_REPEAT) \ val = val - floorf(val); \ else { \ if((wrap_mode) == MDL::TEX_WRAP_CLIP && \ (val < 0.0f || val >= 1.0f)) { \ store_res_func(result, 0.0f); \ return; \ } else if((wrap_mode) == MDL::TEX_WRAP_MIRRORED_REPEAT) { \ float floored_val = floorf(val); \ if((int(floored_val) & 1) != 0) \ val = 1.0f - (val - floored_val); \ else \ val = val - floored_val; \ } \ float inv_hdim = 0.5f * (inv_dim); \ val = fminf(fmaxf(val, inv_hdim), 1.f - inv_hdim); \ } \ val = val * ((crop_vals)[1] - (crop_vals)[0]) + (crop_vals)[0]; \ } \ } while(0) #define USE_SMOOTHERSTEP_FILTER #ifdef USE_SMOOTHERSTEP_FILTER // Modify texture coordinates to get better texture filtering, // see http://www.iquilezles.org/www/articles/texture/texture.htm #define APPLY_SMOOTHERSTEP_FILTER() \ do { \ u = u * tex.size.x + 0.5f; \ v = v * tex.size.y + 0.5f; \ \ float u_i = floorf(u), v_i = floorf(v); \ float u_f = u - u_i; \ float v_f = v - v_i; \ u_f = u_f * u_f * u_f * (u_f * (u_f * 6.f - 15.f) + 10.f); \ v_f = v_f * v_f * v_f * (v_f * (v_f * 6.f - 15.f) + 10.f); \ u = u_i + u_f; \ v = v_i + v_f; \ \ u = (u - 0.5f) * tex.inv_size.x; \ v = (v - 0.5f) * tex.inv_size.y; \ } while(0) #else #define APPLY_SMOOTHERSTEP_FILTER() #endif // Implementation of tex::lookup_float4() for a texture_2d texture. DEVICE void tex_lookup_float4_2d(float result[4], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[2], Tex_wrap_mode const wrap_u, Tex_wrap_mode const wrap_v, float const crop_u[2], float const crop_v[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result4(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; float u = coord[0], v = coord[1]; WRAP_AND_CROP_OR_RETURN_BLACK(u, tex.inv_size.x, wrap_u, crop_u, store_result4); WRAP_AND_CROP_OR_RETURN_BLACK(v, tex.inv_size.y, wrap_v, crop_v, store_result4); APPLY_SMOOTHERSTEP_FILTER(); store_result4(result, tex2D<float4>(tex.filtered_object, u, v)); } // Implementation of tex::lookup_float3() for a texture_2d texture. DEVICE void tex_lookup_float3_2d(float result[3], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[2], Tex_wrap_mode const wrap_u, Tex_wrap_mode const wrap_v, float const crop_u[2], float const crop_v[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result3(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; float u = coord[0], v = coord[1]; WRAP_AND_CROP_OR_RETURN_BLACK(u, tex.inv_size.x, wrap_u, crop_u, store_result3); WRAP_AND_CROP_OR_RETURN_BLACK(v, tex.inv_size.y, wrap_v, crop_v, store_result3); APPLY_SMOOTHERSTEP_FILTER(); store_result3(result, tex2D<float4>(tex.filtered_object, u, v)); } // Implementation of tex::texel_float4() for a texture_2d texture. // Note: uvtile textures are not supported DEVICE void tex_texel_float4_2d(float result[4], Texture_handler_base const* self_base, unsigned texture_idx, int const coord[2], int const /*uv_tile*/[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result4(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; store_result4(result, tex2D<float4>(tex.unfiltered_object, float(coord[0]) * tex.inv_size.x, float(coord[1]) * tex.inv_size.y)); } // Implementation of tex::lookup_float4() for a texture_3d texture. DEVICE void tex_lookup_float4_3d(float result[4], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[3], Tex_wrap_mode wrap_u, Tex_wrap_mode wrap_v, Tex_wrap_mode wrap_w, float const crop_u[2], float const crop_v[2], float const crop_w[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result4(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; float u = coord[0], v = coord[1], w = coord[2]; WRAP_AND_CROP_OR_RETURN_BLACK(u, tex.inv_size.x, wrap_u, crop_u, store_result4); WRAP_AND_CROP_OR_RETURN_BLACK(v, tex.inv_size.y, wrap_v, crop_v, store_result4); WRAP_AND_CROP_OR_RETURN_BLACK(w, tex.inv_size.z, wrap_w, crop_w, store_result4); store_result4(result, tex3D<float4>(tex.filtered_object, u, v, w)); } // Implementation of tex::lookup_float3() for a texture_3d texture. DEVICE void tex_lookup_float3_3d(float result[3], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[3], Tex_wrap_mode wrap_u, Tex_wrap_mode wrap_v, Tex_wrap_mode wrap_w, float const crop_u[2], float const crop_v[2], float const crop_w[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result3(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; float u = coord[0], v = coord[1], w = coord[2]; WRAP_AND_CROP_OR_RETURN_BLACK(u, tex.inv_size.x, wrap_u, crop_u, store_result3); WRAP_AND_CROP_OR_RETURN_BLACK(v, tex.inv_size.y, wrap_v, crop_v, store_result3); WRAP_AND_CROP_OR_RETURN_BLACK(w, tex.inv_size.z, wrap_w, crop_w, store_result3); store_result3(result, tex3D<float4>(tex.filtered_object, u, v, w)); } // Implementation of tex::texel_float4() for a texture_3d texture. DEVICE void tex_texel_float4_3d(float result[4], Texture_handler_base const* self_base, unsigned texture_idx, const int coord[3]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result4(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; store_result4(result, tex3D<float4>(tex.unfiltered_object, float(coord[0]) * tex.inv_size.x, float(coord[1]) * tex.inv_size.y, float(coord[2]) * tex.inv_size.z)); } // Implementation of tex::lookup_float4() for a texture_cube texture. DEVICE void tex_lookup_float4_cube(float result[4], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[3]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result4(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; store_result4( result, texCubemap<float4>(tex.filtered_object, coord[0], coord[1], coord[2])); } // Implementation of tex::lookup_float3() for a texture_cube texture. DEVICE void tex_lookup_float3_cube(float result[3], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[3]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result3(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; store_result3( result, texCubemap<float4>(tex.filtered_object, coord[0], coord[1], coord[2])); } // Implementation of resolution_2d function needed by generated code. // Note: uvtile textures are not supported DEVICE void tex_resolution_2d(int result[2], Texture_handler_base const* self_base, unsigned texture_idx, int const /*uv_tile*/[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero result[0] = 0; result[1] = 0; return; } Texture const& tex = self->textures[texture_idx - 1]; result[0] = tex.size.x; result[1] = tex.size.y; } // Implementation of resolution_3d function needed by generated code. // Note: 3d textures are not supported DEVICE void tex_resolution_3d(int result[3], Texture_handler_base const* self_base, unsigned texture_idx) { // invalid texture returns zero result[0] = 0; result[1] = 0; result[2] = 0; } // Implementation of texture_isvalid(). DEVICE bool tex_texture_isvalid(Texture_handler_base const* self_base, unsigned texture_idx) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); return texture_idx != 0 && texture_idx - 1 < self->num_textures; } // ------------------------------------------------------------------------------------------------ // Light Profiles // ------------------------------------------------------------------------------------------------ // Implementation of light_profile_power() for a light profile. DEVICE float df_light_profile_power(Texture_handler_base const* self_base, unsigned light_profile_idx) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(light_profile_idx == 0 || light_profile_idx - 1 >= self->num_lightprofiles) return 0.0f; // invalid light profile returns zero const Lightprofile& lp = self->lightprofiles[light_profile_idx - 1]; return lp.total_power; } // Implementation of light_profile_maximum() for a light profile. DEVICE float df_light_profile_maximum(Texture_handler_base const* self_base, unsigned light_profile_idx) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(light_profile_idx == 0 || light_profile_idx - 1 >= self->num_lightprofiles) return 0.0f; // invalid light profile returns zero const Lightprofile& lp = self->lightprofiles[light_profile_idx - 1]; return lp.candela_multiplier; } // Implementation of light_profile_isvalid() for a light profile. DEVICE bool df_light_profile_isvalid(Texture_handler_base const* self_base, unsigned light_profile_idx) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); return light_profile_idx != 0 && light_profile_idx - 1 < self->num_lightprofiles; } // binary search through CDF INLINEDEVICE unsigned sample_cdf(const float* cdf, unsigned cdf_size, float xi) { unsigned li = 0; unsigned ri = cdf_size - 1; unsigned m = (li + ri) / 2; while(ri > li) { if(xi < cdf[m]) ri = m; else li = m + 1; m = (li + ri) / 2; } return m; } // Implementation of df::light_profile_evaluate() for a light profile. DEVICE float df_light_profile_evaluate(Texture_handler_base const* self_base, unsigned light_profile_idx, float const theta_phi[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(light_profile_idx == 0 || light_profile_idx - 1 >= self->num_lightprofiles) return 0.0f; // invalid light profile returns zero const Lightprofile& lp = self->lightprofiles[light_profile_idx - 1]; // map theta to 0..1 range const float u = (theta_phi[0] - lp.theta_phi_start.x) * lp.theta_phi_inv_delta.x / float(lp.angular_resolution.x - 1); // converting input phi from -pi..pi to 0..2pi float phi = (theta_phi[1] > 0.0f) ? theta_phi[1] : (float(2.0 * M_PI) + theta_phi[1]); // floorf wraps phi range into 0..2pi phi = phi - lp.theta_phi_start.y - floorf((phi - lp.theta_phi_start.y) * float(0.5 / M_PI)) * float(2.0 * M_PI); // (phi < 0.0f) is no problem, this is handle by the (black) border // since it implies lp.theta_phi_start.y > 0 (and we really have "no data" // below that) const float v = phi * lp.theta_phi_inv_delta.y / float(lp.angular_resolution.y - 1); // wrap_mode: border black would be an alternative (but it produces // artifacts at low res) if(u < 0.0f || u > 1.0f || v < 0.0f || v > 1.0f) return 0.0f; return tex2D<float>(lp.eval_data, u, v) * lp.candela_multiplier; } // Implementation of df::light_profile_sample() for a light profile. DEVICE void df_light_profile_sample(float result[3], // output: theta, phi, pdf Texture_handler_base const* self_base, unsigned light_profile_idx, float const xi[3]) // uniform random values { result[0] = -1.0f; // negative theta means no emission result[1] = -1.0f; result[2] = 0.0f; Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(light_profile_idx == 0 || light_profile_idx - 1 >= self->num_lightprofiles) return; // invalid light profile returns zero const Lightprofile& lp = self->lightprofiles[light_profile_idx - 1]; uint2 res = lp.angular_resolution; // sample theta_out //------------------------------------------- float xi0 = xi[0]; const float* cdf_data_theta = lp.cdf_data; // CDF theta unsigned idx_theta = sample_cdf(cdf_data_theta, res.x - 1, xi0); // binary search float prob_theta = cdf_data_theta[idx_theta]; if(idx_theta > 0) { const float tmp = cdf_data_theta[idx_theta - 1]; prob_theta -= tmp; xi0 -= tmp; } xi0 /= prob_theta; // rescale for re-usage // sample phi_out //------------------------------------------- float xi1 = xi[1]; const float* cdf_data_phi = cdf_data_theta + (res.x - 1) // CDF theta block + (idx_theta * (res.y - 1)); // selected CDF for phi const unsigned idx_phi = sample_cdf(cdf_data_phi, res.y - 1, xi1); // binary search float prob_phi = cdf_data_phi[idx_phi]; if(idx_phi > 0) { const float tmp = cdf_data_phi[idx_phi - 1]; prob_phi -= tmp; xi1 -= tmp; } xi1 /= prob_phi; // rescale for re-usage // compute theta and phi //------------------------------------------- // sample uniformly within the patch (grid cell) const float2 start = lp.theta_phi_start; const float2 delta = lp.theta_phi_delta; const float cos_theta_0 = cosf(start.x + float(idx_theta) * delta.x); const float cos_theta_1 = cosf(start.x + float(idx_theta + 1u) * delta.x); // n = \int_{\theta_0}^{\theta_1} \sin{\theta} \delta \theta // = 1 / (\cos{\theta_0} - \cos{\theta_1}) // // \xi = n * \int_{\theta_0}^{\theta_1} \sin{\theta} \delta // \theta // => \cos{\theta} = (1 - \xi) \cos{\theta_0} + \xi \cos{\theta_1} const float cos_theta = (1.0f - xi1) * cos_theta_0 + xi1 * cos_theta_1; result[0] = acosf(cos_theta); result[1] = start.y + (float(idx_phi) + xi0) * delta.y; // align phi if(result[1] > float(2.0 * M_PI)) result[1] -= float(2.0 * M_PI); // wrap if(result[1] > float(1.0 * M_PI)) result[1] = float(-2.0 * M_PI) + result[1]; // to [-pi, pi] // compute pdf //------------------------------------------- result[2] = prob_theta * prob_phi / (delta.y * (cos_theta_0 - cos_theta_1)); } // Implementation of df::light_profile_pdf() for a light profile. DEVICE float df_light_profile_pdf(Texture_handler_base const* self_base, unsigned light_profile_idx, float const theta_phi[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(light_profile_idx == 0 || light_profile_idx - 1 >= self->num_lightprofiles) return 0.0f; // invalid light profile returns zero const Lightprofile& lp = self->lightprofiles[light_profile_idx - 1]; // CDF data const uint2 res = lp.angular_resolution; const float* cdf_data_theta = lp.cdf_data; // map theta to 0..1 range const float theta = theta_phi[0] - lp.theta_phi_start.x; const int idx_theta = int(theta * lp.theta_phi_inv_delta.x); // converting input phi from -pi..pi to 0..2pi float phi = (theta_phi[1] > 0.0f) ? theta_phi[1] : (float(2.0 * M_PI) + theta_phi[1]); // floorf wraps phi range into 0..2pi phi = phi - lp.theta_phi_start.y - floorf((phi - lp.theta_phi_start.y) * float(0.5 / M_PI)) * float(2.0 * M_PI); // (phi < 0.0f) is no problem, this is handle by the (black) border // since it implies lp.theta_phi_start.y > 0 (and we really have "no data" // below that) const int idx_phi = int(phi * lp.theta_phi_inv_delta.y); // wrap_mode: border black would be an alternative (but it produces // artifacts at low res) if(idx_theta < 0 || idx_theta > (res.x - 2) || idx_phi < 0 || idx_phi > (res.x - 2)) return 0.0f; // get probability for theta //------------------------------------------- float prob_theta = cdf_data_theta[idx_theta]; if(idx_theta > 0) { const float tmp = cdf_data_theta[idx_theta - 1]; prob_theta -= tmp; } // get probability for phi //------------------------------------------- const float* cdf_data_phi = cdf_data_theta + (res.x - 1) // CDF theta block + (idx_theta * (res.y - 1)); // selected CDF for phi float prob_phi = cdf_data_phi[idx_phi]; if(idx_phi > 0) { const float tmp = cdf_data_phi[idx_phi - 1]; prob_phi -= tmp; } // compute probability to select a position in the sphere patch const float2 start = lp.theta_phi_start; const float2 delta = lp.theta_phi_delta; const float cos_theta_0 = cos(start.x + float(idx_theta) * delta.x); const float cos_theta_1 = cos(start.x + float(idx_theta + 1u) * delta.x); return prob_theta * prob_phi / (delta.y * (cos_theta_0 - cos_theta_1)); } // ------------------------------------------------------------------------------------------------ // BSDF Measurements // ------------------------------------------------------------------------------------------------ // Implementation of bsdf_measurement_isvalid() for an MBSDF. DEVICE bool df_bsdf_measurement_isvalid(Texture_handler_base const* self_base, unsigned bsdf_measurement_index) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); return bsdf_measurement_index != 0 && bsdf_measurement_index - 1 < self->num_mbsdfs; } // Implementation of df::bsdf_measurement_resolution() function needed by // generated code, which retrieves the angular and chromatic resolution of the // given MBSDF. The returned triple consists of: number of equi-spaced steps of // theta_i and theta_o, number of equi-spaced steps of phi, and number of color // channels (1 or 3). DEVICE void df_bsdf_measurement_resolution( unsigned result[3], Texture_handler_base const* self_base, unsigned bsdf_measurement_index, Mbsdf_part part) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(bsdf_measurement_index == 0 || bsdf_measurement_index - 1 >= self->num_mbsdfs) { // invalid MBSDF returns zero result[0] = 0; result[1] = 0; result[2] = 0; return; } Mbsdf const& bm = self->mbsdfs[bsdf_measurement_index - 1]; const unsigned part_index = static_cast<unsigned>(part); // check for the part if(bm.has_data[part_index] == 0) { result[0] = 0; result[1] = 0; result[2] = 0; return; } // pass out the information result[0] = bm.angular_resolution[part_index].x; result[1] = bm.angular_resolution[part_index].y; result[2] = bm.num_channels[part_index]; } INLINEDEVICE float3 bsdf_compute_uvw(const float theta_phi_in[2], const float theta_phi_out[2]) { // assuming each phi is between -pi and pi float u = theta_phi_out[1] - theta_phi_in[1]; if(u < 0.0) u += float(2.0 * M_PI); if(u > float(1.0 * M_PI)) u = float(2.0 * M_PI) - u; u *= M_ONE_OVER_PI; const float v = theta_phi_out[0] * float(2.0 / M_PI); const float w = theta_phi_in[0] * float(2.0 / M_PI); return make_float3(u, v, w); } template <typename T> INLINEDEVICE T bsdf_measurement_lookup(const hipTextureObject_t& eval_volume, const float theta_phi_in[2], const float theta_phi_out[2]) { // 3D volume on the GPU (phi_delta x theta_out x theta_in) const float3 uvw = bsdf_compute_uvw(theta_phi_in, theta_phi_out); return tex3D<T>(eval_volume, uvw.x, uvw.y, uvw.z); } // Implementation of df::bsdf_measurement_evaluate() for an MBSDF. DEVICE void df_bsdf_measurement_evaluate(float result[3], Texture_handler_base const* self_base, unsigned bsdf_measurement_index, float const theta_phi_in[2], float const theta_phi_out[2], Mbsdf_part part) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(bsdf_measurement_index == 0 || bsdf_measurement_index - 1 >= self->num_mbsdfs) { // invalid MBSDF returns zero store_result3(result, 0.0f); return; } const Mbsdf& bm = self->mbsdfs[bsdf_measurement_index - 1]; const unsigned part_index = static_cast<unsigned>(part); // check for the parta if(bm.has_data[part_index] == 0) { store_result3(result, 0.0f); return; } // handle channels if(bm.num_channels[part_index] == 3) { const float4 sample = bsdf_measurement_lookup<float4>( bm.eval_data[part_index], theta_phi_in, theta_phi_out); store_result3(result, sample.x, sample.y, sample.z); } else { const float sample = bsdf_measurement_lookup<float>( bm.eval_data[part_index], theta_phi_in, theta_phi_out); store_result3(result, sample); } } // Implementation of df::bsdf_measurement_sample() for an MBSDF. DEVICE void df_bsdf_measurement_sample(float result[3], // output: theta, phi, pdf Texture_handler_base const* self_base, unsigned bsdf_measurement_index, float const theta_phi_out[2], float const xi[3], // uniform random values Mbsdf_part part) { result[0] = -1.0f; // negative theta means absorption result[1] = -1.0f; result[2] = 0.0f; Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(bsdf_measurement_index == 0 || bsdf_measurement_index - 1 >= self->num_mbsdfs) return; // invalid MBSDFs returns zero const Mbsdf& bm = self->mbsdfs[bsdf_measurement_index - 1]; unsigned part_index = static_cast<unsigned>(part); if(bm.has_data[part_index] == 0) return; // check for the part // CDF data uint2 res = bm.angular_resolution[part_index]; const float* sample_data = bm.sample_data[part_index]; // compute the theta_in index (flipping input and output, BSDFs are // symmetric) unsigned idx_theta_in = unsigned(theta_phi_out[0] * M_ONE_OVER_PI * 2.0f * float(res.x)); idx_theta_in = min(idx_theta_in, res.x - 1); // sample theta_out //------------------------------------------- float xi0 = xi[0]; const float* cdf_theta = sample_data + idx_theta_in * res.x; unsigned idx_theta_out = sample_cdf(cdf_theta, res.x, xi0); // binary search float prob_theta = cdf_theta[idx_theta_out]; if(idx_theta_out > 0) { const float tmp = cdf_theta[idx_theta_out - 1]; prob_theta -= tmp; xi0 -= tmp; } xi0 /= prob_theta; // rescale for re-usage // sample phi_out //------------------------------------------- float xi1 = xi[1]; const float* cdf_phi = sample_data + (res.x * res.x) + // CDF theta block (idx_theta_in * res.x + idx_theta_out) * res.y; // selected CDF phi // select which half-circle to choose with probability 0.5 const bool flip = (xi1 > 0.5f); if(flip) xi1 = 1.0f - xi1; xi1 *= 2.0f; unsigned idx_phi_out = sample_cdf(cdf_phi, res.y, xi1); // binary search float prob_phi = cdf_phi[idx_phi_out]; if(idx_phi_out > 0) { const float tmp = cdf_phi[idx_phi_out - 1]; prob_phi -= tmp; xi1 -= tmp; } xi1 /= prob_phi; // rescale for re-usage // compute theta and phi out //------------------------------------------- const float2 inv_res = bm.inv_angular_resolution[part_index]; const float s_theta = float(0.5 * M_PI) * inv_res.x; const float s_phi = float(1.0 * M_PI) * inv_res.y; const float cos_theta_0 = cosf(float(idx_theta_out) * s_theta); const float cos_theta_1 = cosf(float(idx_theta_out + 1u) * s_theta); const float cos_theta = cos_theta_0 * (1.0f - xi1) + cos_theta_1 * xi1; result[0] = acosf(cos_theta); result[1] = (float(idx_phi_out) + xi0) * s_phi; if(flip) result[1] = float(2.0 * M_PI) - result[1]; // phi \in [0, 2pi] // align phi result[1] += (theta_phi_out[1] > 0) ? theta_phi_out[1] : (float(2.0 * M_PI) + theta_phi_out[1]); if(result[1] > float(2.0 * M_PI)) result[1] -= float(2.0 * M_PI); if(result[1] > float(1.0 * M_PI)) result[1] = float(-2.0 * M_PI) + result[1]; // to [-pi, pi] // compute pdf //------------------------------------------- result[2] = prob_theta * prob_phi * 0.5f / (s_phi * (cos_theta_0 - cos_theta_1)); } // Implementation of df::bsdf_measurement_pdf() for an MBSDF. DEVICE float df_bsdf_measurement_pdf(Texture_handler_base const* self_base, unsigned bsdf_measurement_index, float const theta_phi_in[2], float const theta_phi_out[2], Mbsdf_part part) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(bsdf_measurement_index == 0 || bsdf_measurement_index - 1 >= self->num_mbsdfs) return 0.0f; // invalid MBSDF returns zero const Mbsdf& bm = self->mbsdfs[bsdf_measurement_index - 1]; unsigned part_index = static_cast<unsigned>(part); // check for the part if(bm.has_data[part_index] == 0) return 0.0f; // CDF data and resolution const float* sample_data = bm.sample_data[part_index]; uint2 res = bm.angular_resolution[part_index]; // compute indices in the CDF data float3 uvw = bsdf_compute_uvw( theta_phi_in, theta_phi_out); // phi_delta, theta_out, theta_in unsigned idx_theta_in = unsigned(theta_phi_in[0] * M_ONE_OVER_PI * 2.0f * float(res.x)); unsigned idx_theta_out = unsigned(theta_phi_out[0] * M_ONE_OVER_PI * 2.0f * float(res.x)); unsigned idx_phi_out = unsigned(uvw.x * float(res.y)); idx_theta_in = min(idx_theta_in, res.x - 1); idx_theta_out = min(idx_theta_out, res.x - 1); idx_phi_out = min(idx_phi_out, res.y - 1); // get probability to select theta_out const float* cdf_theta = sample_data + idx_theta_in * res.x; float prob_theta = cdf_theta[idx_theta_out]; if(idx_theta_out > 0) { const float tmp = cdf_theta[idx_theta_out - 1]; prob_theta -= tmp; } // get probability to select phi_out const float* cdf_phi = sample_data + (res.x * res.x) + // CDF theta block (idx_theta_in * res.x + idx_theta_out) * res.y; // selected CDF phi float prob_phi = cdf_phi[idx_phi_out]; if(idx_phi_out > 0) { const float tmp = cdf_phi[idx_phi_out - 1]; prob_phi -= tmp; } // compute probability to select a position in the sphere patch float2 inv_res = bm.inv_angular_resolution[part_index]; const float s_theta = float(0.5 * M_PI) * inv_res.x; const float s_phi = float(1.0 * M_PI) * inv_res.y; const float cos_theta_0 = cosf(float(idx_theta_out) * s_theta); const float cos_theta_1 = cosf(float(idx_theta_out + 1u) * s_theta); return prob_theta * prob_phi * 0.5f / (s_phi * (cos_theta_0 - cos_theta_1)); } INLINEDEVICE void df_bsdf_measurement_albedo(float result[2], // output: max (in case of color) // albedo for the selected // direction ([0]) and global ([1]) Texture_handler const* self, unsigned bsdf_measurement_index, float const theta_phi[2], Mbsdf_part part) { const Mbsdf& bm = self->mbsdfs[bsdf_measurement_index - 1]; const unsigned part_index = static_cast<unsigned>(part); // check for the part if(bm.has_data[part_index] == 0) return; const uint2 res = bm.angular_resolution[part_index]; unsigned idx_theta = unsigned(theta_phi[0] * float(2.0 / M_PI) * float(res.x)); idx_theta = min(idx_theta, res.x - 1u); result[0] = bm.albedo_data[part_index][idx_theta]; result[1] = bm.max_albedo[part_index]; } // Implementation of df::bsdf_measurement_albedos() for an MBSDF. DEVICE void df_bsdf_measurement_albedos( float result[4], // output: [0] albedo refl. for theta_phi // [1] max albedo refl. global // [2] albedo trans. for theta_phi // [3] max albedo trans. global Texture_handler_base const* self_base, unsigned bsdf_measurement_index, float const theta_phi[2]) { result[0] = 0.0f; result[1] = 0.0f; result[2] = 0.0f; result[3] = 0.0f; Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(bsdf_measurement_index == 0 || bsdf_measurement_index - 1 >= self->num_mbsdfs) return; // invalid MBSDF returns zero df_bsdf_measurement_albedo(&result[0], self, bsdf_measurement_index, theta_phi, MDL::MBSDF_DATA_REFLECTION); df_bsdf_measurement_albedo(&result[2], self, bsdf_measurement_index, theta_phi, MDL::MBSDF_DATA_TRANSMISSION); } struct Target_code_data { size_t num_textures; // number of elements in the textures field hipDeviceptr_t textures; // a device pointer to a list of Texture objects, if used size_t num_mbsdfs; // number of elements in the mbsdfs field hipDeviceptr_t mbsdfs; // a device pointer to a list of mbsdfs objects, if used size_t num_lightprofiles; // number of elements in the lightprofiles field hipDeviceptr_t lightprofiles; // a device pointer to a list of mbsdfs objects, if used hipDeviceptr_t ro_data_segment; // a device pointer to the read-only data // segment, if used }; DEVICE MDL::tct_float3 v2fmdl(const Vec3& vec) { return { vec.x, vec.y, vec.z }; } DEVICE Vec3 f2vmdl(const MDL::tct_float3& vec) { return { vec.x, vec.y, vec.z }; } DEVICE void __continuation_callable__sample(Payload* payload, Vec3 dir, Vec3 hit, Vec3 ng, Vec3 ns, Vec2 texCoord, float rayTime, bool front) { auto data = getSBTData<DataDesc>(); const Target_code_data* resource = reinterpret_cast<Target_code_data*>(data->resource); SamplerContext& sampler = *payload->sampler; MDL::Shading_state_material mat; mat.normal = v2f(ns); mat.geom_normal = v2f(ng); mat.position = v2f(hit); mat.animation_time = rayTime; MDL::tct_float3 tex; tex.x = texCoord.x, tex.y = texCoord.y, tex.z = 0.0f; mat.text_coords = &tex; // TODO:texCoords // fake tangent float3 tu, tv; { Vec3 bx = { 1.0f, 0.0f, 0.0f }, by = { 0.0f, 1.0f, 0.0f }; Vec3 t = glm::normalize(fabs(ns.x) < fabs(ns.y) ? glm::cross(ns, bx) : glm::cross(ns, by)); Vec3 bt = glm::cross(t, ns); tu = v2f(t), tv = v2f(bt); } mat.tangent_u = &tu; // TODO:tangent mat.tangent_v = &tv; mat.text_results = nullptr; // TODO:reserve text_results mat.ro_data_segment = reinterpret_cast<char*>(resource->ro_data_segment); mat.world_to_object = nullptr; mat.object_to_world = nullptr; // TODO:transform mat.object_id = 0; // TODO:instance Texture_handler handler; handler.lightprofiles = reinterpret_cast<Lightprofile*>(resource->lightprofiles); handler.num_lightprofiles = resource->num_lightprofiles; handler.mbsdfs = reinterpret_cast<Mbsdf*>(resource->mbsdfs); handler.num_mbsdfs = resource->num_mbsdfs; handler.textures = reinterpret_cast<Texture*>(resource->textures); handler.num_textures = resource->num_textures; MDL::Resource_data resData; resData.shared_data = nullptr; resData.texture_handler = &handler; bsdf_init(&mat, &resData, nullptr, data->argData); Vec3 offset = ng * 0.001f; // TODO:check offset // sample incoming direction { MDL::Bsdf_sample_data sampleF; if(front) { sampleF.ior1 = { 1.0f, 1.0f, 1.0f }; sampleF.ior2.x = MI_NEURAYLIB_BSDF_USE_MATERIAL_IOR; } else { sampleF.ior1.x = MI_NEURAYLIB_BSDF_USE_MATERIAL_IOR; sampleF.ior2 = { 1.0f, 1.0f, 1.0f }; } sampleF.k1 = v2fmdl(-dir); sampleF.xi = { sampler(), sampler(), sampler(), sampler() }; bsdf_sample(&sampleF, &mat, &resData, nullptr, data->argData); payload->f = f2vmdl(sampleF.bsdf_over_pdf); payload->wi = f2vmdl(sampleF.k2); payload->ori = hit + (sampleF.event_type & MDL::BSDF_EVENT_TRANSMISSION ? -offset : offset); payload->hit = true; } // sample light { MDL::Bsdf_evaluate_data<MDL::DF_HSM_NONE> evalF; if(front) { evalF.ior1 = { 1.0f, 1.0f, 1.0f }; evalF.ior2.x = MI_NEURAYLIB_BSDF_USE_MATERIAL_IOR; } else { evalF.ior1.x = MI_NEURAYLIB_BSDF_USE_MATERIAL_IOR; evalF.ior2 = { 1.0f, 1.0f, 1.0f }; } LightSample ls = sampleOneLight(hit + (front ? offset : -offset), rayTime, sampler); evalF.k1 = v2f(-dir); evalF.k2 = v2f(ls.wi); bsdf_evaluate(&evalF, &mat, &resData, nullptr, data->argData); payload->rad = ls.rad * (f2vmdl(evalF.bsdf_diffuse) /*+ f2vmdl(evalF.bsdf_glossy)*/); // TODO:light importance sampling } } void check(BuiltinMaterialSampleFunction = __continuation_callable__sample);
968ed45d4b745a30d6c1067b7d24fdd17a0bef8a.cu
#include "../../Shared/KernelShared.hpp" #include "DataDesc.hpp" #pragma warning(push, 0) #include <mi/mdl_sdk.h> #include <vector_functions.hpp> #pragma warning(pop) namespace MDL = mi::neuraylib; // From examples/mdl_sdk/shared/texture_support_cuda.h #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #define M_ONE_OVER_PI 0.318309886183790671538 using MDL::Mbsdf_part; using MDL::Tex_wrap_mode; using MDL::Texture_handler_base; // Custom structure representing an MDL texture, containing filtered and // unfiltered CUDA texture objects and the size of the texture. struct Texture final { CUtexObject filtered_object; // uses filter mode cudaFilterModeLinear CUtexObject unfiltered_object; // uses filter mode cudaFilterModePoint uint3 size; // size of the texture, needed for texel access float3 inv_size; // the inverse values of the size of the texture }; // Custom structure representing an MDL BSDF measurement. struct Mbsdf { unsigned has_data[2]; // true if there is a measurement for this part CUtexObject eval_data[2]; // uses filter mode cudaFilterModeLinear float max_albedo[2]; // max albedo used to limit the multiplier float* sample_data[2]; // CDFs for sampling a BSDF measurement float* albedo_data[2]; // max albedo for each theta (isotropic) uint2 angular_resolution[2]; // size of the dataset, needed for texel access float2 inv_angular_resolution[2]; // the inverse values of the size of the // dataset unsigned num_channels[2]; // number of color channels (1 or 3) }; // Structure representing a Light Profile struct Lightprofile { __device__ explicit Lightprofile() : angular_resolution(make_uint2(0, 0)), theta_phi_start(make_float2(0.0f, 0.0f)), theta_phi_delta(make_float2(0.0f, 0.0f)), theta_phi_inv_delta(make_float2(0.0f, 0.0f)), candela_multiplier(0.0f), total_power(0.0f), eval_data(0) {} uint2 angular_resolution; // angular resolution of the grid float2 theta_phi_start; // start of the grid float2 theta_phi_delta; // angular step size float2 theta_phi_inv_delta; // inverse step size float candela_multiplier; // factor to rescale the normalized data float total_power; CUtexObject eval_data; // normalized data sampled on grid float* cdf_data; // CDFs for sampling a light profile }; // The texture handler structure required by the MDL SDK with custom additional // fields. struct Texture_handler : MDL::Texture_handler_base { // additional data for the texture access functions can be provided here size_t num_textures; // the number of textures used by the material // (without the invalid texture) Texture const* textures; // the textures used by the material // (without the invalid texture) size_t num_mbsdfs; // the number of mbsdfs used by the material // (without the invalid mbsdf) Mbsdf const* mbsdfs; // the mbsdfs used by the material // (without the invalid mbsdf) size_t num_lightprofiles; // number of elements in the lightprofiles field // (without the invalid light profile) Lightprofile const* lightprofiles; // a device pointer to a list of mbsdfs objects, if used // (without the invalid light profile) }; DEVICE void bsdf_init(MDL::Shading_state_material* state, const MDL::Resource_data* res_data, const void* exception_state, const char* arg_block_data); DEVICE void bsdf_sample(MDL::Bsdf_sample_data* data, MDL::Shading_state_material* state, const MDL::Resource_data* res_data, const void* exception_state, const char* arg_block_data); DEVICE void bsdf_evaluate(MDL::Bsdf_evaluate_data<MDL::DF_HSM_NONE>* data, MDL::Shading_state_material* state, const MDL::Resource_data* res_data, const void* exception_state, const char* arg_block_data); DEVICE void bsdf_pdf(MDL::Bsdf_pdf_data* data, MDL::Shading_state_material* state, const MDL::Resource_data* res_data, const void* exception_state, const char* arg_block_data); // From examples/mdl_sdk/shared/texture_support_cuda.h // Stores a float4 in a float[4] array. INLINEDEVICE void store_result4(float res[4], const float4& v) { res[0] = v.x; res[1] = v.y; res[2] = v.z; res[3] = v.w; } // Stores a float in all elements of a float[4] array. INLINEDEVICE void store_result4(float res[4], float s) { res[0] = res[1] = res[2] = res[3] = s; } // Stores the given float values in a float[4] array. INLINEDEVICE void store_result4(float res[4], float v0, float v1, float v2, float v3) { res[0] = v0; res[1] = v1; res[2] = v2; res[3] = v3; } // Stores a float3 in a float[3] array. INLINEDEVICE void store_result3(float res[3], float3 const& v) { res[0] = v.x; res[1] = v.y; res[2] = v.z; } // Stores a float4 in a float[3] array, ignoring v.w. INLINEDEVICE void store_result3(float res[3], const float4& v) { res[0] = v.x; res[1] = v.y; res[2] = v.z; } // Stores a float in all elements of a float[3] array. INLINEDEVICE void store_result3(float res[3], float s) { res[0] = res[1] = res[2] = s; } // Stores the given float values in a float[3] array. INLINEDEVICE void store_result3(float res[3], float v0, float v1, float v2) { res[0] = v0; res[1] = v1; res[2] = v2; } // Stores the luminance if a given float[3] in a float. INLINEDEVICE void store_result1(float* res, float3 const& v) { // store luminance *res = 0.212671 * v.x + 0.715160 * v.y + 0.072169 * v.z; } // Stores the luminance if a given float[3] in a float. INLINEDEVICE void store_result1(float* res, float v0, float v1, float v2) { // store luminance *res = 0.212671 * v0 + 0.715160 * v1 + 0.072169 * v2; } // Stores a given float in a float INLINEDEVICE void store_result1(float* res, float s) { *res = s; } // ------------------------------------------------------------------------------------------------ // Textures // ------------------------------------------------------------------------------------------------ // Applies wrapping and cropping to the given coordinate. // Note: This macro returns if wrap mode is clip and the coordinate is out of // range. #define WRAP_AND_CROP_OR_RETURN_BLACK(val, inv_dim, wrap_mode, crop_vals, \ store_res_func) \ do { \ if((wrap_mode) == MDL::TEX_WRAP_REPEAT && (crop_vals)[0] == 0.0f && \ (crop_vals)[1] == 1.0f) { \ /* Do nothing, use texture sampler default behavior */ \ } else { \ if((wrap_mode) == MDL::TEX_WRAP_REPEAT) \ val = val - floorf(val); \ else { \ if((wrap_mode) == MDL::TEX_WRAP_CLIP && \ (val < 0.0f || val >= 1.0f)) { \ store_res_func(result, 0.0f); \ return; \ } else if((wrap_mode) == MDL::TEX_WRAP_MIRRORED_REPEAT) { \ float floored_val = floorf(val); \ if((int(floored_val) & 1) != 0) \ val = 1.0f - (val - floored_val); \ else \ val = val - floored_val; \ } \ float inv_hdim = 0.5f * (inv_dim); \ val = fminf(fmaxf(val, inv_hdim), 1.f - inv_hdim); \ } \ val = val * ((crop_vals)[1] - (crop_vals)[0]) + (crop_vals)[0]; \ } \ } while(0) #define USE_SMOOTHERSTEP_FILTER #ifdef USE_SMOOTHERSTEP_FILTER // Modify texture coordinates to get better texture filtering, // see http://www.iquilezles.org/www/articles/texture/texture.htm #define APPLY_SMOOTHERSTEP_FILTER() \ do { \ u = u * tex.size.x + 0.5f; \ v = v * tex.size.y + 0.5f; \ \ float u_i = floorf(u), v_i = floorf(v); \ float u_f = u - u_i; \ float v_f = v - v_i; \ u_f = u_f * u_f * u_f * (u_f * (u_f * 6.f - 15.f) + 10.f); \ v_f = v_f * v_f * v_f * (v_f * (v_f * 6.f - 15.f) + 10.f); \ u = u_i + u_f; \ v = v_i + v_f; \ \ u = (u - 0.5f) * tex.inv_size.x; \ v = (v - 0.5f) * tex.inv_size.y; \ } while(0) #else #define APPLY_SMOOTHERSTEP_FILTER() #endif // Implementation of tex::lookup_float4() for a texture_2d texture. DEVICE void tex_lookup_float4_2d(float result[4], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[2], Tex_wrap_mode const wrap_u, Tex_wrap_mode const wrap_v, float const crop_u[2], float const crop_v[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result4(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; float u = coord[0], v = coord[1]; WRAP_AND_CROP_OR_RETURN_BLACK(u, tex.inv_size.x, wrap_u, crop_u, store_result4); WRAP_AND_CROP_OR_RETURN_BLACK(v, tex.inv_size.y, wrap_v, crop_v, store_result4); APPLY_SMOOTHERSTEP_FILTER(); store_result4(result, tex2D<float4>(tex.filtered_object, u, v)); } // Implementation of tex::lookup_float3() for a texture_2d texture. DEVICE void tex_lookup_float3_2d(float result[3], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[2], Tex_wrap_mode const wrap_u, Tex_wrap_mode const wrap_v, float const crop_u[2], float const crop_v[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result3(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; float u = coord[0], v = coord[1]; WRAP_AND_CROP_OR_RETURN_BLACK(u, tex.inv_size.x, wrap_u, crop_u, store_result3); WRAP_AND_CROP_OR_RETURN_BLACK(v, tex.inv_size.y, wrap_v, crop_v, store_result3); APPLY_SMOOTHERSTEP_FILTER(); store_result3(result, tex2D<float4>(tex.filtered_object, u, v)); } // Implementation of tex::texel_float4() for a texture_2d texture. // Note: uvtile textures are not supported DEVICE void tex_texel_float4_2d(float result[4], Texture_handler_base const* self_base, unsigned texture_idx, int const coord[2], int const /*uv_tile*/[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result4(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; store_result4(result, tex2D<float4>(tex.unfiltered_object, float(coord[0]) * tex.inv_size.x, float(coord[1]) * tex.inv_size.y)); } // Implementation of tex::lookup_float4() for a texture_3d texture. DEVICE void tex_lookup_float4_3d(float result[4], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[3], Tex_wrap_mode wrap_u, Tex_wrap_mode wrap_v, Tex_wrap_mode wrap_w, float const crop_u[2], float const crop_v[2], float const crop_w[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result4(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; float u = coord[0], v = coord[1], w = coord[2]; WRAP_AND_CROP_OR_RETURN_BLACK(u, tex.inv_size.x, wrap_u, crop_u, store_result4); WRAP_AND_CROP_OR_RETURN_BLACK(v, tex.inv_size.y, wrap_v, crop_v, store_result4); WRAP_AND_CROP_OR_RETURN_BLACK(w, tex.inv_size.z, wrap_w, crop_w, store_result4); store_result4(result, tex3D<float4>(tex.filtered_object, u, v, w)); } // Implementation of tex::lookup_float3() for a texture_3d texture. DEVICE void tex_lookup_float3_3d(float result[3], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[3], Tex_wrap_mode wrap_u, Tex_wrap_mode wrap_v, Tex_wrap_mode wrap_w, float const crop_u[2], float const crop_v[2], float const crop_w[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result3(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; float u = coord[0], v = coord[1], w = coord[2]; WRAP_AND_CROP_OR_RETURN_BLACK(u, tex.inv_size.x, wrap_u, crop_u, store_result3); WRAP_AND_CROP_OR_RETURN_BLACK(v, tex.inv_size.y, wrap_v, crop_v, store_result3); WRAP_AND_CROP_OR_RETURN_BLACK(w, tex.inv_size.z, wrap_w, crop_w, store_result3); store_result3(result, tex3D<float4>(tex.filtered_object, u, v, w)); } // Implementation of tex::texel_float4() for a texture_3d texture. DEVICE void tex_texel_float4_3d(float result[4], Texture_handler_base const* self_base, unsigned texture_idx, const int coord[3]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result4(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; store_result4(result, tex3D<float4>(tex.unfiltered_object, float(coord[0]) * tex.inv_size.x, float(coord[1]) * tex.inv_size.y, float(coord[2]) * tex.inv_size.z)); } // Implementation of tex::lookup_float4() for a texture_cube texture. DEVICE void tex_lookup_float4_cube(float result[4], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[3]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result4(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; store_result4( result, texCubemap<float4>(tex.filtered_object, coord[0], coord[1], coord[2])); } // Implementation of tex::lookup_float3() for a texture_cube texture. DEVICE void tex_lookup_float3_cube(float result[3], Texture_handler_base const* self_base, unsigned texture_idx, float const coord[3]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero store_result3(result, 0.0f); return; } Texture const& tex = self->textures[texture_idx - 1]; store_result3( result, texCubemap<float4>(tex.filtered_object, coord[0], coord[1], coord[2])); } // Implementation of resolution_2d function needed by generated code. // Note: uvtile textures are not supported DEVICE void tex_resolution_2d(int result[2], Texture_handler_base const* self_base, unsigned texture_idx, int const /*uv_tile*/[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(texture_idx == 0 || texture_idx - 1 >= self->num_textures) { // invalid texture returns zero result[0] = 0; result[1] = 0; return; } Texture const& tex = self->textures[texture_idx - 1]; result[0] = tex.size.x; result[1] = tex.size.y; } // Implementation of resolution_3d function needed by generated code. // Note: 3d textures are not supported DEVICE void tex_resolution_3d(int result[3], Texture_handler_base const* self_base, unsigned texture_idx) { // invalid texture returns zero result[0] = 0; result[1] = 0; result[2] = 0; } // Implementation of texture_isvalid(). DEVICE bool tex_texture_isvalid(Texture_handler_base const* self_base, unsigned texture_idx) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); return texture_idx != 0 && texture_idx - 1 < self->num_textures; } // ------------------------------------------------------------------------------------------------ // Light Profiles // ------------------------------------------------------------------------------------------------ // Implementation of light_profile_power() for a light profile. DEVICE float df_light_profile_power(Texture_handler_base const* self_base, unsigned light_profile_idx) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(light_profile_idx == 0 || light_profile_idx - 1 >= self->num_lightprofiles) return 0.0f; // invalid light profile returns zero const Lightprofile& lp = self->lightprofiles[light_profile_idx - 1]; return lp.total_power; } // Implementation of light_profile_maximum() for a light profile. DEVICE float df_light_profile_maximum(Texture_handler_base const* self_base, unsigned light_profile_idx) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(light_profile_idx == 0 || light_profile_idx - 1 >= self->num_lightprofiles) return 0.0f; // invalid light profile returns zero const Lightprofile& lp = self->lightprofiles[light_profile_idx - 1]; return lp.candela_multiplier; } // Implementation of light_profile_isvalid() for a light profile. DEVICE bool df_light_profile_isvalid(Texture_handler_base const* self_base, unsigned light_profile_idx) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); return light_profile_idx != 0 && light_profile_idx - 1 < self->num_lightprofiles; } // binary search through CDF INLINEDEVICE unsigned sample_cdf(const float* cdf, unsigned cdf_size, float xi) { unsigned li = 0; unsigned ri = cdf_size - 1; unsigned m = (li + ri) / 2; while(ri > li) { if(xi < cdf[m]) ri = m; else li = m + 1; m = (li + ri) / 2; } return m; } // Implementation of df::light_profile_evaluate() for a light profile. DEVICE float df_light_profile_evaluate(Texture_handler_base const* self_base, unsigned light_profile_idx, float const theta_phi[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(light_profile_idx == 0 || light_profile_idx - 1 >= self->num_lightprofiles) return 0.0f; // invalid light profile returns zero const Lightprofile& lp = self->lightprofiles[light_profile_idx - 1]; // map theta to 0..1 range const float u = (theta_phi[0] - lp.theta_phi_start.x) * lp.theta_phi_inv_delta.x / float(lp.angular_resolution.x - 1); // converting input phi from -pi..pi to 0..2pi float phi = (theta_phi[1] > 0.0f) ? theta_phi[1] : (float(2.0 * M_PI) + theta_phi[1]); // floorf wraps phi range into 0..2pi phi = phi - lp.theta_phi_start.y - floorf((phi - lp.theta_phi_start.y) * float(0.5 / M_PI)) * float(2.0 * M_PI); // (phi < 0.0f) is no problem, this is handle by the (black) border // since it implies lp.theta_phi_start.y > 0 (and we really have "no data" // below that) const float v = phi * lp.theta_phi_inv_delta.y / float(lp.angular_resolution.y - 1); // wrap_mode: border black would be an alternative (but it produces // artifacts at low res) if(u < 0.0f || u > 1.0f || v < 0.0f || v > 1.0f) return 0.0f; return tex2D<float>(lp.eval_data, u, v) * lp.candela_multiplier; } // Implementation of df::light_profile_sample() for a light profile. DEVICE void df_light_profile_sample(float result[3], // output: theta, phi, pdf Texture_handler_base const* self_base, unsigned light_profile_idx, float const xi[3]) // uniform random values { result[0] = -1.0f; // negative theta means no emission result[1] = -1.0f; result[2] = 0.0f; Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(light_profile_idx == 0 || light_profile_idx - 1 >= self->num_lightprofiles) return; // invalid light profile returns zero const Lightprofile& lp = self->lightprofiles[light_profile_idx - 1]; uint2 res = lp.angular_resolution; // sample theta_out //------------------------------------------- float xi0 = xi[0]; const float* cdf_data_theta = lp.cdf_data; // CDF theta unsigned idx_theta = sample_cdf(cdf_data_theta, res.x - 1, xi0); // binary search float prob_theta = cdf_data_theta[idx_theta]; if(idx_theta > 0) { const float tmp = cdf_data_theta[idx_theta - 1]; prob_theta -= tmp; xi0 -= tmp; } xi0 /= prob_theta; // rescale for re-usage // sample phi_out //------------------------------------------- float xi1 = xi[1]; const float* cdf_data_phi = cdf_data_theta + (res.x - 1) // CDF theta block + (idx_theta * (res.y - 1)); // selected CDF for phi const unsigned idx_phi = sample_cdf(cdf_data_phi, res.y - 1, xi1); // binary search float prob_phi = cdf_data_phi[idx_phi]; if(idx_phi > 0) { const float tmp = cdf_data_phi[idx_phi - 1]; prob_phi -= tmp; xi1 -= tmp; } xi1 /= prob_phi; // rescale for re-usage // compute theta and phi //------------------------------------------- // sample uniformly within the patch (grid cell) const float2 start = lp.theta_phi_start; const float2 delta = lp.theta_phi_delta; const float cos_theta_0 = cosf(start.x + float(idx_theta) * delta.x); const float cos_theta_1 = cosf(start.x + float(idx_theta + 1u) * delta.x); // n = \int_{\theta_0}^{\theta_1} \sin{\theta} \delta \theta // = 1 / (\cos{\theta_0} - \cos{\theta_1}) // // \xi = n * \int_{\theta_0}^{\theta_1} \sin{\theta} \delta // \theta // => \cos{\theta} = (1 - \xi) \cos{\theta_0} + \xi \cos{\theta_1} const float cos_theta = (1.0f - xi1) * cos_theta_0 + xi1 * cos_theta_1; result[0] = acosf(cos_theta); result[1] = start.y + (float(idx_phi) + xi0) * delta.y; // align phi if(result[1] > float(2.0 * M_PI)) result[1] -= float(2.0 * M_PI); // wrap if(result[1] > float(1.0 * M_PI)) result[1] = float(-2.0 * M_PI) + result[1]; // to [-pi, pi] // compute pdf //------------------------------------------- result[2] = prob_theta * prob_phi / (delta.y * (cos_theta_0 - cos_theta_1)); } // Implementation of df::light_profile_pdf() for a light profile. DEVICE float df_light_profile_pdf(Texture_handler_base const* self_base, unsigned light_profile_idx, float const theta_phi[2]) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(light_profile_idx == 0 || light_profile_idx - 1 >= self->num_lightprofiles) return 0.0f; // invalid light profile returns zero const Lightprofile& lp = self->lightprofiles[light_profile_idx - 1]; // CDF data const uint2 res = lp.angular_resolution; const float* cdf_data_theta = lp.cdf_data; // map theta to 0..1 range const float theta = theta_phi[0] - lp.theta_phi_start.x; const int idx_theta = int(theta * lp.theta_phi_inv_delta.x); // converting input phi from -pi..pi to 0..2pi float phi = (theta_phi[1] > 0.0f) ? theta_phi[1] : (float(2.0 * M_PI) + theta_phi[1]); // floorf wraps phi range into 0..2pi phi = phi - lp.theta_phi_start.y - floorf((phi - lp.theta_phi_start.y) * float(0.5 / M_PI)) * float(2.0 * M_PI); // (phi < 0.0f) is no problem, this is handle by the (black) border // since it implies lp.theta_phi_start.y > 0 (and we really have "no data" // below that) const int idx_phi = int(phi * lp.theta_phi_inv_delta.y); // wrap_mode: border black would be an alternative (but it produces // artifacts at low res) if(idx_theta < 0 || idx_theta > (res.x - 2) || idx_phi < 0 || idx_phi > (res.x - 2)) return 0.0f; // get probability for theta //------------------------------------------- float prob_theta = cdf_data_theta[idx_theta]; if(idx_theta > 0) { const float tmp = cdf_data_theta[idx_theta - 1]; prob_theta -= tmp; } // get probability for phi //------------------------------------------- const float* cdf_data_phi = cdf_data_theta + (res.x - 1) // CDF theta block + (idx_theta * (res.y - 1)); // selected CDF for phi float prob_phi = cdf_data_phi[idx_phi]; if(idx_phi > 0) { const float tmp = cdf_data_phi[idx_phi - 1]; prob_phi -= tmp; } // compute probability to select a position in the sphere patch const float2 start = lp.theta_phi_start; const float2 delta = lp.theta_phi_delta; const float cos_theta_0 = cos(start.x + float(idx_theta) * delta.x); const float cos_theta_1 = cos(start.x + float(idx_theta + 1u) * delta.x); return prob_theta * prob_phi / (delta.y * (cos_theta_0 - cos_theta_1)); } // ------------------------------------------------------------------------------------------------ // BSDF Measurements // ------------------------------------------------------------------------------------------------ // Implementation of bsdf_measurement_isvalid() for an MBSDF. DEVICE bool df_bsdf_measurement_isvalid(Texture_handler_base const* self_base, unsigned bsdf_measurement_index) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); return bsdf_measurement_index != 0 && bsdf_measurement_index - 1 < self->num_mbsdfs; } // Implementation of df::bsdf_measurement_resolution() function needed by // generated code, which retrieves the angular and chromatic resolution of the // given MBSDF. The returned triple consists of: number of equi-spaced steps of // theta_i and theta_o, number of equi-spaced steps of phi, and number of color // channels (1 or 3). DEVICE void df_bsdf_measurement_resolution( unsigned result[3], Texture_handler_base const* self_base, unsigned bsdf_measurement_index, Mbsdf_part part) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(bsdf_measurement_index == 0 || bsdf_measurement_index - 1 >= self->num_mbsdfs) { // invalid MBSDF returns zero result[0] = 0; result[1] = 0; result[2] = 0; return; } Mbsdf const& bm = self->mbsdfs[bsdf_measurement_index - 1]; const unsigned part_index = static_cast<unsigned>(part); // check for the part if(bm.has_data[part_index] == 0) { result[0] = 0; result[1] = 0; result[2] = 0; return; } // pass out the information result[0] = bm.angular_resolution[part_index].x; result[1] = bm.angular_resolution[part_index].y; result[2] = bm.num_channels[part_index]; } INLINEDEVICE float3 bsdf_compute_uvw(const float theta_phi_in[2], const float theta_phi_out[2]) { // assuming each phi is between -pi and pi float u = theta_phi_out[1] - theta_phi_in[1]; if(u < 0.0) u += float(2.0 * M_PI); if(u > float(1.0 * M_PI)) u = float(2.0 * M_PI) - u; u *= M_ONE_OVER_PI; const float v = theta_phi_out[0] * float(2.0 / M_PI); const float w = theta_phi_in[0] * float(2.0 / M_PI); return make_float3(u, v, w); } template <typename T> INLINEDEVICE T bsdf_measurement_lookup(const cudaTextureObject_t& eval_volume, const float theta_phi_in[2], const float theta_phi_out[2]) { // 3D volume on the GPU (phi_delta x theta_out x theta_in) const float3 uvw = bsdf_compute_uvw(theta_phi_in, theta_phi_out); return tex3D<T>(eval_volume, uvw.x, uvw.y, uvw.z); } // Implementation of df::bsdf_measurement_evaluate() for an MBSDF. DEVICE void df_bsdf_measurement_evaluate(float result[3], Texture_handler_base const* self_base, unsigned bsdf_measurement_index, float const theta_phi_in[2], float const theta_phi_out[2], Mbsdf_part part) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(bsdf_measurement_index == 0 || bsdf_measurement_index - 1 >= self->num_mbsdfs) { // invalid MBSDF returns zero store_result3(result, 0.0f); return; } const Mbsdf& bm = self->mbsdfs[bsdf_measurement_index - 1]; const unsigned part_index = static_cast<unsigned>(part); // check for the parta if(bm.has_data[part_index] == 0) { store_result3(result, 0.0f); return; } // handle channels if(bm.num_channels[part_index] == 3) { const float4 sample = bsdf_measurement_lookup<float4>( bm.eval_data[part_index], theta_phi_in, theta_phi_out); store_result3(result, sample.x, sample.y, sample.z); } else { const float sample = bsdf_measurement_lookup<float>( bm.eval_data[part_index], theta_phi_in, theta_phi_out); store_result3(result, sample); } } // Implementation of df::bsdf_measurement_sample() for an MBSDF. DEVICE void df_bsdf_measurement_sample(float result[3], // output: theta, phi, pdf Texture_handler_base const* self_base, unsigned bsdf_measurement_index, float const theta_phi_out[2], float const xi[3], // uniform random values Mbsdf_part part) { result[0] = -1.0f; // negative theta means absorption result[1] = -1.0f; result[2] = 0.0f; Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(bsdf_measurement_index == 0 || bsdf_measurement_index - 1 >= self->num_mbsdfs) return; // invalid MBSDFs returns zero const Mbsdf& bm = self->mbsdfs[bsdf_measurement_index - 1]; unsigned part_index = static_cast<unsigned>(part); if(bm.has_data[part_index] == 0) return; // check for the part // CDF data uint2 res = bm.angular_resolution[part_index]; const float* sample_data = bm.sample_data[part_index]; // compute the theta_in index (flipping input and output, BSDFs are // symmetric) unsigned idx_theta_in = unsigned(theta_phi_out[0] * M_ONE_OVER_PI * 2.0f * float(res.x)); idx_theta_in = min(idx_theta_in, res.x - 1); // sample theta_out //------------------------------------------- float xi0 = xi[0]; const float* cdf_theta = sample_data + idx_theta_in * res.x; unsigned idx_theta_out = sample_cdf(cdf_theta, res.x, xi0); // binary search float prob_theta = cdf_theta[idx_theta_out]; if(idx_theta_out > 0) { const float tmp = cdf_theta[idx_theta_out - 1]; prob_theta -= tmp; xi0 -= tmp; } xi0 /= prob_theta; // rescale for re-usage // sample phi_out //------------------------------------------- float xi1 = xi[1]; const float* cdf_phi = sample_data + (res.x * res.x) + // CDF theta block (idx_theta_in * res.x + idx_theta_out) * res.y; // selected CDF phi // select which half-circle to choose with probability 0.5 const bool flip = (xi1 > 0.5f); if(flip) xi1 = 1.0f - xi1; xi1 *= 2.0f; unsigned idx_phi_out = sample_cdf(cdf_phi, res.y, xi1); // binary search float prob_phi = cdf_phi[idx_phi_out]; if(idx_phi_out > 0) { const float tmp = cdf_phi[idx_phi_out - 1]; prob_phi -= tmp; xi1 -= tmp; } xi1 /= prob_phi; // rescale for re-usage // compute theta and phi out //------------------------------------------- const float2 inv_res = bm.inv_angular_resolution[part_index]; const float s_theta = float(0.5 * M_PI) * inv_res.x; const float s_phi = float(1.0 * M_PI) * inv_res.y; const float cos_theta_0 = cosf(float(idx_theta_out) * s_theta); const float cos_theta_1 = cosf(float(idx_theta_out + 1u) * s_theta); const float cos_theta = cos_theta_0 * (1.0f - xi1) + cos_theta_1 * xi1; result[0] = acosf(cos_theta); result[1] = (float(idx_phi_out) + xi0) * s_phi; if(flip) result[1] = float(2.0 * M_PI) - result[1]; // phi \in [0, 2pi] // align phi result[1] += (theta_phi_out[1] > 0) ? theta_phi_out[1] : (float(2.0 * M_PI) + theta_phi_out[1]); if(result[1] > float(2.0 * M_PI)) result[1] -= float(2.0 * M_PI); if(result[1] > float(1.0 * M_PI)) result[1] = float(-2.0 * M_PI) + result[1]; // to [-pi, pi] // compute pdf //------------------------------------------- result[2] = prob_theta * prob_phi * 0.5f / (s_phi * (cos_theta_0 - cos_theta_1)); } // Implementation of df::bsdf_measurement_pdf() for an MBSDF. DEVICE float df_bsdf_measurement_pdf(Texture_handler_base const* self_base, unsigned bsdf_measurement_index, float const theta_phi_in[2], float const theta_phi_out[2], Mbsdf_part part) { Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(bsdf_measurement_index == 0 || bsdf_measurement_index - 1 >= self->num_mbsdfs) return 0.0f; // invalid MBSDF returns zero const Mbsdf& bm = self->mbsdfs[bsdf_measurement_index - 1]; unsigned part_index = static_cast<unsigned>(part); // check for the part if(bm.has_data[part_index] == 0) return 0.0f; // CDF data and resolution const float* sample_data = bm.sample_data[part_index]; uint2 res = bm.angular_resolution[part_index]; // compute indices in the CDF data float3 uvw = bsdf_compute_uvw( theta_phi_in, theta_phi_out); // phi_delta, theta_out, theta_in unsigned idx_theta_in = unsigned(theta_phi_in[0] * M_ONE_OVER_PI * 2.0f * float(res.x)); unsigned idx_theta_out = unsigned(theta_phi_out[0] * M_ONE_OVER_PI * 2.0f * float(res.x)); unsigned idx_phi_out = unsigned(uvw.x * float(res.y)); idx_theta_in = min(idx_theta_in, res.x - 1); idx_theta_out = min(idx_theta_out, res.x - 1); idx_phi_out = min(idx_phi_out, res.y - 1); // get probability to select theta_out const float* cdf_theta = sample_data + idx_theta_in * res.x; float prob_theta = cdf_theta[idx_theta_out]; if(idx_theta_out > 0) { const float tmp = cdf_theta[idx_theta_out - 1]; prob_theta -= tmp; } // get probability to select phi_out const float* cdf_phi = sample_data + (res.x * res.x) + // CDF theta block (idx_theta_in * res.x + idx_theta_out) * res.y; // selected CDF phi float prob_phi = cdf_phi[idx_phi_out]; if(idx_phi_out > 0) { const float tmp = cdf_phi[idx_phi_out - 1]; prob_phi -= tmp; } // compute probability to select a position in the sphere patch float2 inv_res = bm.inv_angular_resolution[part_index]; const float s_theta = float(0.5 * M_PI) * inv_res.x; const float s_phi = float(1.0 * M_PI) * inv_res.y; const float cos_theta_0 = cosf(float(idx_theta_out) * s_theta); const float cos_theta_1 = cosf(float(idx_theta_out + 1u) * s_theta); return prob_theta * prob_phi * 0.5f / (s_phi * (cos_theta_0 - cos_theta_1)); } INLINEDEVICE void df_bsdf_measurement_albedo(float result[2], // output: max (in case of color) // albedo for the selected // direction ([0]) and global ([1]) Texture_handler const* self, unsigned bsdf_measurement_index, float const theta_phi[2], Mbsdf_part part) { const Mbsdf& bm = self->mbsdfs[bsdf_measurement_index - 1]; const unsigned part_index = static_cast<unsigned>(part); // check for the part if(bm.has_data[part_index] == 0) return; const uint2 res = bm.angular_resolution[part_index]; unsigned idx_theta = unsigned(theta_phi[0] * float(2.0 / M_PI) * float(res.x)); idx_theta = min(idx_theta, res.x - 1u); result[0] = bm.albedo_data[part_index][idx_theta]; result[1] = bm.max_albedo[part_index]; } // Implementation of df::bsdf_measurement_albedos() for an MBSDF. DEVICE void df_bsdf_measurement_albedos( float result[4], // output: [0] albedo refl. for theta_phi // [1] max albedo refl. global // [2] albedo trans. for theta_phi // [3] max albedo trans. global Texture_handler_base const* self_base, unsigned bsdf_measurement_index, float const theta_phi[2]) { result[0] = 0.0f; result[1] = 0.0f; result[2] = 0.0f; result[3] = 0.0f; Texture_handler const* self = static_cast<Texture_handler const*>(self_base); if(bsdf_measurement_index == 0 || bsdf_measurement_index - 1 >= self->num_mbsdfs) return; // invalid MBSDF returns zero df_bsdf_measurement_albedo(&result[0], self, bsdf_measurement_index, theta_phi, MDL::MBSDF_DATA_REFLECTION); df_bsdf_measurement_albedo(&result[2], self, bsdf_measurement_index, theta_phi, MDL::MBSDF_DATA_TRANSMISSION); } struct Target_code_data { size_t num_textures; // number of elements in the textures field CUdeviceptr textures; // a device pointer to a list of Texture objects, if used size_t num_mbsdfs; // number of elements in the mbsdfs field CUdeviceptr mbsdfs; // a device pointer to a list of mbsdfs objects, if used size_t num_lightprofiles; // number of elements in the lightprofiles field CUdeviceptr lightprofiles; // a device pointer to a list of mbsdfs objects, if used CUdeviceptr ro_data_segment; // a device pointer to the read-only data // segment, if used }; DEVICE MDL::tct_float3 v2fmdl(const Vec3& vec) { return { vec.x, vec.y, vec.z }; } DEVICE Vec3 f2vmdl(const MDL::tct_float3& vec) { return { vec.x, vec.y, vec.z }; } DEVICE void __continuation_callable__sample(Payload* payload, Vec3 dir, Vec3 hit, Vec3 ng, Vec3 ns, Vec2 texCoord, float rayTime, bool front) { auto data = getSBTData<DataDesc>(); const Target_code_data* resource = reinterpret_cast<Target_code_data*>(data->resource); SamplerContext& sampler = *payload->sampler; MDL::Shading_state_material mat; mat.normal = v2f(ns); mat.geom_normal = v2f(ng); mat.position = v2f(hit); mat.animation_time = rayTime; MDL::tct_float3 tex; tex.x = texCoord.x, tex.y = texCoord.y, tex.z = 0.0f; mat.text_coords = &tex; // TODO:texCoords // fake tangent float3 tu, tv; { Vec3 bx = { 1.0f, 0.0f, 0.0f }, by = { 0.0f, 1.0f, 0.0f }; Vec3 t = glm::normalize(fabs(ns.x) < fabs(ns.y) ? glm::cross(ns, bx) : glm::cross(ns, by)); Vec3 bt = glm::cross(t, ns); tu = v2f(t), tv = v2f(bt); } mat.tangent_u = &tu; // TODO:tangent mat.tangent_v = &tv; mat.text_results = nullptr; // TODO:reserve text_results mat.ro_data_segment = reinterpret_cast<char*>(resource->ro_data_segment); mat.world_to_object = nullptr; mat.object_to_world = nullptr; // TODO:transform mat.object_id = 0; // TODO:instance Texture_handler handler; handler.lightprofiles = reinterpret_cast<Lightprofile*>(resource->lightprofiles); handler.num_lightprofiles = resource->num_lightprofiles; handler.mbsdfs = reinterpret_cast<Mbsdf*>(resource->mbsdfs); handler.num_mbsdfs = resource->num_mbsdfs; handler.textures = reinterpret_cast<Texture*>(resource->textures); handler.num_textures = resource->num_textures; MDL::Resource_data resData; resData.shared_data = nullptr; resData.texture_handler = &handler; bsdf_init(&mat, &resData, nullptr, data->argData); Vec3 offset = ng * 0.001f; // TODO:check offset // sample incoming direction { MDL::Bsdf_sample_data sampleF; if(front) { sampleF.ior1 = { 1.0f, 1.0f, 1.0f }; sampleF.ior2.x = MI_NEURAYLIB_BSDF_USE_MATERIAL_IOR; } else { sampleF.ior1.x = MI_NEURAYLIB_BSDF_USE_MATERIAL_IOR; sampleF.ior2 = { 1.0f, 1.0f, 1.0f }; } sampleF.k1 = v2fmdl(-dir); sampleF.xi = { sampler(), sampler(), sampler(), sampler() }; bsdf_sample(&sampleF, &mat, &resData, nullptr, data->argData); payload->f = f2vmdl(sampleF.bsdf_over_pdf); payload->wi = f2vmdl(sampleF.k2); payload->ori = hit + (sampleF.event_type & MDL::BSDF_EVENT_TRANSMISSION ? -offset : offset); payload->hit = true; } // sample light { MDL::Bsdf_evaluate_data<MDL::DF_HSM_NONE> evalF; if(front) { evalF.ior1 = { 1.0f, 1.0f, 1.0f }; evalF.ior2.x = MI_NEURAYLIB_BSDF_USE_MATERIAL_IOR; } else { evalF.ior1.x = MI_NEURAYLIB_BSDF_USE_MATERIAL_IOR; evalF.ior2 = { 1.0f, 1.0f, 1.0f }; } LightSample ls = sampleOneLight(hit + (front ? offset : -offset), rayTime, sampler); evalF.k1 = v2f(-dir); evalF.k2 = v2f(ls.wi); bsdf_evaluate(&evalF, &mat, &resData, nullptr, data->argData); payload->rad = ls.rad * (f2vmdl(evalF.bsdf_diffuse) /*+ f2vmdl(evalF.bsdf_glossy)*/); // TODO:light importance sampling } } void check(BuiltinMaterialSampleFunction = __continuation_callable__sample);
f2299d43afdc0ce7a46d3f2082289a19de096a9a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by salmon on 16-7-25. // #include </usr/local/cuda/include/hip/hip_runtime_api.h> #include "../../../../../../../usr/local/cuda/include/device_launch_parameters.h" extern "C" { #include <assert.h> #include "spParallelCUDA.h" #include "../../spMPI.h" } int spParallelDeviceInitialize(int argc, char **argv) { int num_of_device = 0; SP_DEVICE_CALL(hipGetDeviceCount(&num_of_device)); SP_DEVICE_CALL(hipSetDevice(spMPIRank() % num_of_device)); SP_DEVICE_CALL(hipDeviceSynchronize()); // Wait for the GPU launched work to complete SP_DEVICE_CALL(hipGetLastError()); // SP_DEVICE_CALL(hipDeviceSetCacheConfig(hipFuncCachePreferL1)); return SP_SUCCESS; } int spParallelDeviceFinalize() { SP_DEVICE_CALL(hipDeviceReset()); return SP_SUCCESS; } int spMemoryDeviceAlloc(void **p, size_type s) { if (s > 0) {SP_DEVICE_CALL(hipMalloc(p, s)); } else { *p = NULL; } return SP_SUCCESS; } int spMemoryDeviceFree(void **_P_) { if (*_P_ != NULL) { SP_DEVICE_CALL(hipFree(*_P_)); *_P_ = NULL; } return SP_SUCCESS;; }; int spMemoryCopy(void *dest, void const *src, size_type s) { SP_DEVICE_CALL(hipMemcpy(dest, src, s, hipMemcpyDefault)); return SP_SUCCESS; } int spMemoryCopyToCache(const void *dest, void const *src, size_type s) { SP_DEVICE_CALL(hipMemcpyToSymbol(dest, src, s)); return SP_SUCCESS; } int spMemorySet(void *dest, int v, size_type s) { SP_DEVICE_CALL (hipMemset(dest, v, s)); return SP_SUCCESS; } int spMemoryHostAlloc(void **p, size_type s) { SP_DEVICE_CALL (hipHostMalloc(p, s, hipHostMallocDefault)); return SP_SUCCESS; }; int spMemoryHostFree(void **p) { if (*p != NULL) { SP_DEVICE_CALL(hipHostFree(*p)); *p = NULL; } return SP_SUCCESS; } __global__ void spParallelDeviceFillIntKernel(int *d, int v, size_type max) { for (size_t s = threadIdx.x + blockIdx.x * blockDim.x; s < max; s += gridDim.x * blockDim.x) { d[s] = v; } }; int spParallelDeviceFillInt(int *d, int v, size_type s) { SP_CALL_DEVICE_KERNEL(spParallelDeviceFillIntKernel, 16, 256, d, v, s); return SP_SUCCESS; }; __global__ void spParallelDeviceFillRealKernel(Real *d, Real v, size_type max) { for (size_type s = threadIdx.x + blockIdx.x * blockDim.x; s < max; s += gridDim.x * blockDim.x) { d[s] = v; } }; int spParallelDeviceFillReal(Real *d, Real v, size_type s) { SP_CALL_DEVICE_KERNEL(spParallelDeviceFillRealKernel, 16, 256, d, v, s); return SP_SUCCESS; }; __global__ void spParallelAssignKernel(size_type max, size_type const *offset, Real *d, Real const *v) { size_type num_of_thread = blockDim.x * gridDim.x * blockDim.x * gridDim.x * blockDim.x * gridDim.x; for (size_type s = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x + (threadIdx.x + blockIdx.x * blockDim.x) * blockDim.x * gridDim.x * blockDim.y * gridDim.y; s < max; s += num_of_thread) { d[offset[s]] = v[s]; } }; int spParallelAssign(size_type num_of_point, size_type *offset, Real *d, Real const *v) { SP_CALL_DEVICE_KERNEL(spParallelAssignKernel, 16, 256, num_of_point, offset, d, v); return SP_SUCCESS; }; // //int spMemoryDeviceToHost(void **p, void *src, size_type size_in_byte) //{ // int SP_SUCCESS = SP_SUCCESS; // SP_CALL(spMemoryHostAlloc(p, size_in_byte)); // SP_CALL(spMemoryCopy(*p, src, size_in_byte)); // return SP_SUCCESS; // //} // //int spMemoryHostFree(void **p) //{ // int SP_SUCCESS = SP_SUCCESS; // SP_CALL(spMemoryHostFree(p)); // return SP_SUCCESS; // //}
f2299d43afdc0ce7a46d3f2082289a19de096a9a.cu
// // Created by salmon on 16-7-25. // #include </usr/local/cuda/include/cuda_runtime_api.h> #include "../../../../../../../usr/local/cuda/include/device_launch_parameters.h" extern "C" { #include <assert.h> #include "spParallelCUDA.h" #include "../../spMPI.h" } int spParallelDeviceInitialize(int argc, char **argv) { int num_of_device = 0; SP_DEVICE_CALL(cudaGetDeviceCount(&num_of_device)); SP_DEVICE_CALL(cudaSetDevice(spMPIRank() % num_of_device)); SP_DEVICE_CALL(cudaThreadSynchronize()); // Wait for the GPU launched work to complete SP_DEVICE_CALL(cudaGetLastError()); // SP_DEVICE_CALL(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); return SP_SUCCESS; } int spParallelDeviceFinalize() { SP_DEVICE_CALL(cudaDeviceReset()); return SP_SUCCESS; } int spMemoryDeviceAlloc(void **p, size_type s) { if (s > 0) {SP_DEVICE_CALL(cudaMalloc(p, s)); } else { *p = NULL; } return SP_SUCCESS; } int spMemoryDeviceFree(void **_P_) { if (*_P_ != NULL) { SP_DEVICE_CALL(cudaFree(*_P_)); *_P_ = NULL; } return SP_SUCCESS;; }; int spMemoryCopy(void *dest, void const *src, size_type s) { SP_DEVICE_CALL(cudaMemcpy(dest, src, s, cudaMemcpyDefault)); return SP_SUCCESS; } int spMemoryCopyToCache(const void *dest, void const *src, size_type s) { SP_DEVICE_CALL(cudaMemcpyToSymbol(dest, src, s)); return SP_SUCCESS; } int spMemorySet(void *dest, int v, size_type s) { SP_DEVICE_CALL (cudaMemset(dest, v, s)); return SP_SUCCESS; } int spMemoryHostAlloc(void **p, size_type s) { SP_DEVICE_CALL (cudaHostAlloc(p, s, cudaHostAllocDefault)); return SP_SUCCESS; }; int spMemoryHostFree(void **p) { if (*p != NULL) { SP_DEVICE_CALL(cudaFreeHost(*p)); *p = NULL; } return SP_SUCCESS; } __global__ void spParallelDeviceFillIntKernel(int *d, int v, size_type max) { for (size_t s = threadIdx.x + blockIdx.x * blockDim.x; s < max; s += gridDim.x * blockDim.x) { d[s] = v; } }; int spParallelDeviceFillInt(int *d, int v, size_type s) { SP_CALL_DEVICE_KERNEL(spParallelDeviceFillIntKernel, 16, 256, d, v, s); return SP_SUCCESS; }; __global__ void spParallelDeviceFillRealKernel(Real *d, Real v, size_type max) { for (size_type s = threadIdx.x + blockIdx.x * blockDim.x; s < max; s += gridDim.x * blockDim.x) { d[s] = v; } }; int spParallelDeviceFillReal(Real *d, Real v, size_type s) { SP_CALL_DEVICE_KERNEL(spParallelDeviceFillRealKernel, 16, 256, d, v, s); return SP_SUCCESS; }; __global__ void spParallelAssignKernel(size_type max, size_type const *offset, Real *d, Real const *v) { size_type num_of_thread = blockDim.x * gridDim.x * blockDim.x * gridDim.x * blockDim.x * gridDim.x; for (size_type s = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x + (threadIdx.x + blockIdx.x * blockDim.x) * blockDim.x * gridDim.x * blockDim.y * gridDim.y; s < max; s += num_of_thread) { d[offset[s]] = v[s]; } }; int spParallelAssign(size_type num_of_point, size_type *offset, Real *d, Real const *v) { SP_CALL_DEVICE_KERNEL(spParallelAssignKernel, 16, 256, num_of_point, offset, d, v); return SP_SUCCESS; }; // //int spMemoryDeviceToHost(void **p, void *src, size_type size_in_byte) //{ // int SP_SUCCESS = SP_SUCCESS; // SP_CALL(spMemoryHostAlloc(p, size_in_byte)); // SP_CALL(spMemoryCopy(*p, src, size_in_byte)); // return SP_SUCCESS; // //} // //int spMemoryHostFree(void **p) //{ // int SP_SUCCESS = SP_SUCCESS; // SP_CALL(spMemoryHostFree(p)); // return SP_SUCCESS; // //}
5af44c439161e36af61a8a842ee2c8988fd667b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _SCAN_NAIVE_KERNEL_H_ #define _SCAN_NAIVE_KERNEL_H_ #define block_size 128 // **===--------------------- Modify this function -----------------------===** //! @param g_data input data in global memory // result is expected in index 0 of g_data //! @param n input number of elements to reduce from input data // **===------------------------------------------------------------------===** __global__ void reduction(unsigned int *g_data, int n) { __shared__ unsigned int partialsum[2*block_size]; unsigned int t = threadIdx.x; unsigned int start = 2*blockDim.x*blockIdx.x; if(start + t < n) partialsum[t] = g_data[start + t]; else partialsum[t] = 0.0f; if(start + blockDim.x + t < n) partialsum[blockDim.x + t] = g_data[start + blockDim.x + t]; else partialsum[blockDim.x + t] = 0.0f; for(unsigned int stride = blockDim.x; stride >= 1; stride >>= 1) { __syncthreads(); if(t < stride) partialsum[t] += partialsum[t + stride]; } g_data[blockIdx.x] = partialsum[0]; //Worked //***********NOTE************************************************************************ //This method doesn't works because each thread accesses and updates g_data[0] parallelly. So each thread gets the initial value of g_data and the value returned is the value returned by the last thread. //if(blockIdx.x >= 0 && blockIdx.x < n/(2*block_size)) // g_data[0] += partialsum[0]; } #endif // #ifndef _SCAN_NAIVE_KERNEL_H_
5af44c439161e36af61a8a842ee2c8988fd667b7.cu
#ifndef _SCAN_NAIVE_KERNEL_H_ #define _SCAN_NAIVE_KERNEL_H_ #define block_size 128 // **===--------------------- Modify this function -----------------------===** //! @param g_data input data in global memory // result is expected in index 0 of g_data //! @param n input number of elements to reduce from input data // **===------------------------------------------------------------------===** __global__ void reduction(unsigned int *g_data, int n) { __shared__ unsigned int partialsum[2*block_size]; unsigned int t = threadIdx.x; unsigned int start = 2*blockDim.x*blockIdx.x; if(start + t < n) partialsum[t] = g_data[start + t]; else partialsum[t] = 0.0f; if(start + blockDim.x + t < n) partialsum[blockDim.x + t] = g_data[start + blockDim.x + t]; else partialsum[blockDim.x + t] = 0.0f; for(unsigned int stride = blockDim.x; stride >= 1; stride >>= 1) { __syncthreads(); if(t < stride) partialsum[t] += partialsum[t + stride]; } g_data[blockIdx.x] = partialsum[0]; //Worked //***********NOTE************************************************************************ //This method doesn't works because each thread accesses and updates g_data[0] parallelly. So each thread gets the initial value of g_data and the value returned is the value returned by the last thread. //if(blockIdx.x >= 0 && blockIdx.x < n/(2*block_size)) // g_data[0] += partialsum[0]; } #endif // #ifndef _SCAN_NAIVE_KERNEL_H_
209e70efa877252086146487277b53b9550df9cb.hip
// !!! This is a file automatically generated by hipify!!! #include <GL\glew.h> #include <GL\glfw.h> #include <GL\freeglut.h> #include <GL\GL.h> #include <Windows.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include "cuda_gl_interop.h" #include <SDL.h> #include <iostream> #include <string> #include <sstream> #include "cuda_renderer.cuh" #include "Vector3D.cuh" #include <vector> #include "Color.cuh" #include "Camera.cuh" #include "Matrix_hip.cuh" #include "IGeometry.cuh" #include "IShader.cuh" #include "Node.cuh" #include "Lambert.cuh" #include "Plane.cuh" #include "Sphere.cuh" #include "EventHandler.h" #include "Menu.h" #include "Settings.cuh" #include "RaytracerControls.cuh" using namespace std; extern "C" void cudaRenderer(uchar4* dev_vfb); extern "C" void freeDeviceMemory(); extern "C" void initScene(); extern "C" void cameraBeginFrame(); extern "C" void updateScene(const double& elapsedTime, const double& currentTime); unsigned frameCount; unsigned lastFrameEnd; unsigned lastTitleUpdateTime; unsigned lastTitleUpdateFrameCount; const char* const appName = "CUDA Traycer"; //* Data handles to the OpenGL buffer GLuint bufferObj; cudaGraphicsResource* resource; /** * @brief - render the scene using glDrawPixels() * and then swap the buffers using glfwSwapBuffers() */ static void glRenderScene() { glDrawPixels(GlobalSettings::RES_X, GlobalSettings::RES_Y, GL_RGBA, GL_UNSIGNED_BYTE, 0); glfwSwapBuffers(); } /** * @brief - Function that prints CUDA specs * of the GPU device/s on the console */ void printGPUSpecs() { hipDeviceProp_t prop; int count; hipGetDeviceCount(&count); for (int i = 0; i < count; ++i) { hipGetDeviceProperties( &prop, i ); printf( " --- General Information for device %d ---\n", i ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) { printf( "Enabled\n" ); } else { printf( "Disabled\n"); } printf( "Kernel execution timeout : " ); if (prop.kernelExecTimeoutEnabled) { printf( "Enabled\n" ); } else { printf( "Disabled\n" ); } printf( " --- Memory Information for device %d ---\n", i ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", i ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); } } /** * @brief - Wrapper function that creates timer and captures the start and stop time * @param start - output - captures the start time * @param stop - output - captires the stop time */ void cudaStartTimer(hipEvent_t& start, hipEvent_t& stop) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); } /** * @brief - Wrapper function that takes the previously captured start and stop time * from cudaStartTimer() function, calculates the elapsed time, * prints it on the console and shows it on the window frame * @param start - the start time that is previously captured by cudaStartTimer() * @param stop - the stop time that is previously captured by cudaStartTimer() * @reference - cudaStartTimer(hipEvent_t& start, hipEvent_t& stop) */ void cudaStopTimer(hipEvent_t& start, hipEvent_t& stop) { hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf( "Time to render: %3.1f ms\n\n", elapsedTime); char info[128]; sprintf(info, "CUDA Traycer || Time to render: %3.1f ms", elapsedTime); glfwSetWindowTitle(info); hipEventDestroy(start); hipEventDestroy(stop); } /** * @brief - Calculate and display on the window frame * the mean frame time and the frames per second. * Using glfwGetTime() to register the time (in seconds). */ void displayFrameCounter() { ++frameCount; const unsigned now = glfwGetTime(); const unsigned frameTime = now - lastFrameEnd; const unsigned titleUpdateTimeDelta = now - lastTitleUpdateTime; if (titleUpdateTimeDelta > 1) { const unsigned framesDelta = frameCount - lastTitleUpdateFrameCount; const unsigned meanFrameTime = titleUpdateTimeDelta / framesDelta; const unsigned fps = framesDelta / titleUpdateTimeDelta; std::ostringstream title; title << appName << " :\t\t\t mean frame time: " << meanFrameTime << " ms || fps: " << fps; title.flush(); glfwSetWindowTitle(title.str().c_str()); lastTitleUpdateTime = now; lastTitleUpdateFrameCount = frameCount; } lastFrameEnd = glfwGetTime(); } /** * Set CUDA device (GPU) * Initialize GLFW and GLEW and open GLFW window * Generate and bind the buffer data */ void OpenGL_Setup(); /** * @brief - Perform a clenup that: * - Unregisters the CUDA recource * - Unbinds the OpenGL buffer * - Deletes the OpenGL buffer */ void Clean_OpenGL_and_CUDA(); int main(int argc, char** argv) { hipDeviceSetLimit(hipLimitStackSize, STACK_SIZE); Menu mainMenu(appName); mainMenu.Destroy(); printGPUSpecs(); EventHandler eventController; OpenGL_Setup(); //* framebuffer used by the GPU uchar4* dev_vfb; size_t size; initScene(); if (GlobalSettings::realTime) { double lastTime = glfwGetTime(); while (glfwGetWindowParam(GLFW_OPENED)) { double thisTime = glfwGetTime(); updateScene(thisTime - lastTime, thisTime); // notify CUDA runtime that we want to share bufferObj with resource (CUDA) hipGraphicsGLRegisterBuffer( &resource, bufferObj, hipGraphicsMapFlagsNone ); hipGraphicsMapResources( 1, &resource, NULL ); // map the addres of 'resource' to 'dev_vfb' hipGraphicsResourceGetMappedPointer( (void**)&dev_vfb, &size, resource); cameraBeginFrame(); displayFrameCounter(); cudaRenderer(dev_vfb); // unmap resource so the CUDA and OpenGL buffers can synchronisze hipGraphicsUnmapResources( 1, &resource, NULL ); eventController.handleEvents(); glfwSetKeyCallback(eventController.keyboardCallback); glfwSetMouseButtonCallback(eventController.mouseKeyCallback); lastTime = thisTime; glRenderScene(); } } else { hipGraphicsGLRegisterBuffer( &resource, bufferObj, hipGraphicsMapFlagsNone ); hipGraphicsMapResources( 1, &resource, NULL ); hipGraphicsResourceGetMappedPointer( (void**)&dev_vfb, &size, resource); hipEvent_t start, stop; cudaStartTimer(start, stop); cudaRenderer(dev_vfb); hipGraphicsUnmapResources( 1, &resource, NULL ); glRenderScene(); cudaStopTimer(start, stop); while (glfwGetWindowParam(GLFW_OPENED)) { eventController.handleUserInput(); glfwWaitEvents(); } } freeDeviceMemory(); Clean_OpenGL_and_CUDA(); glfwTerminate(); return EXIT_SUCCESS; } void OpenGL_Setup() { hipDeviceProp_t prop; int device; memset(&prop, 0, sizeof(hipDeviceProp_t)); prop.major = 1; prop.minor = 0; hipChooseDevice(&device, &prop); hipGLSetGLDevice(device); if (!glfwInit()) { std::cerr << "Could not initialize GLFW\n"; } // open a window with GLFW glfwOpenWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_COMPAT_PROFILE); //GLFW_OPENGL_CORE_PROFILE , GLFW_OPENGL_COMPAT_PROFILE glfwOpenWindowHint(GLFW_OPENGL_VERSION_MAJOR, 3); glfwOpenWindowHint(GLFW_OPENGL_VERSION_MINOR, 2); glfwOpenWindowHint(GLFW_WINDOW_NO_RESIZE, GL_TRUE); if(!glfwOpenWindow(GlobalSettings::RES_X, GlobalSettings::RES_Y, 8, 8, 8, 8, 16, 0, GlobalSettings::fullscreen ? GLFW_FULLSCREEN : GLFW_WINDOW)) { std::cerr << "glfwOpenWindow failed!\n"; } glewExperimental = GL_TRUE; if (glewInit() != GLEW_OK) { std::cerr << "Failed to initialize GLEW\n"; } while(glGetError() != GL_NO_ERROR) {} glGenBuffers(1, &bufferObj); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj); glBufferData( GL_PIXEL_UNPACK_BUFFER_ARB, GlobalSettings::RES_X * GlobalSettings::RES_Y * 4, NULL, GL_DYNAMIC_DRAW_ARB ); } void Clean_OpenGL_and_CUDA() { hipGraphicsUnregisterResource( resource ); glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, 0 ); glDeleteBuffers( 1, &bufferObj ); }
209e70efa877252086146487277b53b9550df9cb.cu
#include <GL\glew.h> #include <GL\glfw.h> #include <GL\freeglut.h> #include <GL\GL.h> #include <Windows.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda.h" #include "cuda_gl_interop.h" #include <SDL.h> #include <iostream> #include <string> #include <sstream> #include "cuda_renderer.cuh" #include "Vector3D.cuh" #include <vector> #include "Color.cuh" #include "Camera.cuh" #include "Matrix.cuh" #include "IGeometry.cuh" #include "IShader.cuh" #include "Node.cuh" #include "Lambert.cuh" #include "Plane.cuh" #include "Sphere.cuh" #include "EventHandler.h" #include "Menu.h" #include "Settings.cuh" #include "RaytracerControls.cuh" using namespace std; extern "C" void cudaRenderer(uchar4* dev_vfb); extern "C" void freeDeviceMemory(); extern "C" void initScene(); extern "C" void cameraBeginFrame(); extern "C" void updateScene(const double& elapsedTime, const double& currentTime); unsigned frameCount; unsigned lastFrameEnd; unsigned lastTitleUpdateTime; unsigned lastTitleUpdateFrameCount; const char* const appName = "CUDA Traycer"; //* Data handles to the OpenGL buffer GLuint bufferObj; cudaGraphicsResource* resource; /** * @brief - render the scene using glDrawPixels() * and then swap the buffers using glfwSwapBuffers() */ static void glRenderScene() { glDrawPixels(GlobalSettings::RES_X, GlobalSettings::RES_Y, GL_RGBA, GL_UNSIGNED_BYTE, 0); glfwSwapBuffers(); } /** * @brief - Function that prints CUDA specs * of the GPU device/s on the console */ void printGPUSpecs() { cudaDeviceProp prop; int count; cudaGetDeviceCount(&count); for (int i = 0; i < count; ++i) { cudaGetDeviceProperties( &prop, i ); printf( " --- General Information for device %d ---\n", i ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) { printf( "Enabled\n" ); } else { printf( "Disabled\n"); } printf( "Kernel execution timeout : " ); if (prop.kernelExecTimeoutEnabled) { printf( "Enabled\n" ); } else { printf( "Disabled\n" ); } printf( " --- Memory Information for device %d ---\n", i ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", i ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); } } /** * @brief - Wrapper function that creates timer and captures the start and stop time * @param start - output - captures the start time * @param stop - output - captires the stop time */ void cudaStartTimer(cudaEvent_t& start, cudaEvent_t& stop) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); } /** * @brief - Wrapper function that takes the previously captured start and stop time * from cudaStartTimer() function, calculates the elapsed time, * prints it on the console and shows it on the window frame * @param start - the start time that is previously captured by cudaStartTimer() * @param stop - the stop time that is previously captured by cudaStartTimer() * @reference - cudaStartTimer(cudaEvent_t& start, cudaEvent_t& stop) */ void cudaStopTimer(cudaEvent_t& start, cudaEvent_t& stop) { cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf( "Time to render: %3.1f ms\n\n", elapsedTime); char info[128]; sprintf(info, "CUDA Traycer || Time to render: %3.1f ms", elapsedTime); glfwSetWindowTitle(info); cudaEventDestroy(start); cudaEventDestroy(stop); } /** * @brief - Calculate and display on the window frame * the mean frame time and the frames per second. * Using glfwGetTime() to register the time (in seconds). */ void displayFrameCounter() { ++frameCount; const unsigned now = glfwGetTime(); const unsigned frameTime = now - lastFrameEnd; const unsigned titleUpdateTimeDelta = now - lastTitleUpdateTime; if (titleUpdateTimeDelta > 1) { const unsigned framesDelta = frameCount - lastTitleUpdateFrameCount; const unsigned meanFrameTime = titleUpdateTimeDelta / framesDelta; const unsigned fps = framesDelta / titleUpdateTimeDelta; std::ostringstream title; title << appName << " :\t\t\t mean frame time: " << meanFrameTime << " ms || fps: " << fps; title.flush(); glfwSetWindowTitle(title.str().c_str()); lastTitleUpdateTime = now; lastTitleUpdateFrameCount = frameCount; } lastFrameEnd = glfwGetTime(); } /** * Set CUDA device (GPU) * Initialize GLFW and GLEW and open GLFW window * Generate and bind the buffer data */ void OpenGL_Setup(); /** * @brief - Perform a clenup that: * - Unregisters the CUDA recource * - Unbinds the OpenGL buffer * - Deletes the OpenGL buffer */ void Clean_OpenGL_and_CUDA(); int main(int argc, char** argv) { cudaDeviceSetLimit(cudaLimitStackSize, STACK_SIZE); Menu mainMenu(appName); mainMenu.Destroy(); printGPUSpecs(); EventHandler eventController; OpenGL_Setup(); //* framebuffer used by the GPU uchar4* dev_vfb; size_t size; initScene(); if (GlobalSettings::realTime) { double lastTime = glfwGetTime(); while (glfwGetWindowParam(GLFW_OPENED)) { double thisTime = glfwGetTime(); updateScene(thisTime - lastTime, thisTime); // notify CUDA runtime that we want to share bufferObj with resource (CUDA) cudaGraphicsGLRegisterBuffer( &resource, bufferObj, cudaGraphicsMapFlagsNone ); cudaGraphicsMapResources( 1, &resource, NULL ); // map the addres of 'resource' to 'dev_vfb' cudaGraphicsResourceGetMappedPointer( (void**)&dev_vfb, &size, resource); cameraBeginFrame(); displayFrameCounter(); cudaRenderer(dev_vfb); // unmap resource so the CUDA and OpenGL buffers can synchronisze cudaGraphicsUnmapResources( 1, &resource, NULL ); eventController.handleEvents(); glfwSetKeyCallback(eventController.keyboardCallback); glfwSetMouseButtonCallback(eventController.mouseKeyCallback); lastTime = thisTime; glRenderScene(); } } else { cudaGraphicsGLRegisterBuffer( &resource, bufferObj, cudaGraphicsMapFlagsNone ); cudaGraphicsMapResources( 1, &resource, NULL ); cudaGraphicsResourceGetMappedPointer( (void**)&dev_vfb, &size, resource); cudaEvent_t start, stop; cudaStartTimer(start, stop); cudaRenderer(dev_vfb); cudaGraphicsUnmapResources( 1, &resource, NULL ); glRenderScene(); cudaStopTimer(start, stop); while (glfwGetWindowParam(GLFW_OPENED)) { eventController.handleUserInput(); glfwWaitEvents(); } } freeDeviceMemory(); Clean_OpenGL_and_CUDA(); glfwTerminate(); return EXIT_SUCCESS; } void OpenGL_Setup() { cudaDeviceProp prop; int device; memset(&prop, 0, sizeof(cudaDeviceProp)); prop.major = 1; prop.minor = 0; cudaChooseDevice(&device, &prop); cudaGLSetGLDevice(device); if (!glfwInit()) { std::cerr << "Could not initialize GLFW\n"; } // open a window with GLFW glfwOpenWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_COMPAT_PROFILE); //GLFW_OPENGL_CORE_PROFILE , GLFW_OPENGL_COMPAT_PROFILE glfwOpenWindowHint(GLFW_OPENGL_VERSION_MAJOR, 3); glfwOpenWindowHint(GLFW_OPENGL_VERSION_MINOR, 2); glfwOpenWindowHint(GLFW_WINDOW_NO_RESIZE, GL_TRUE); if(!glfwOpenWindow(GlobalSettings::RES_X, GlobalSettings::RES_Y, 8, 8, 8, 8, 16, 0, GlobalSettings::fullscreen ? GLFW_FULLSCREEN : GLFW_WINDOW)) { std::cerr << "glfwOpenWindow failed!\n"; } glewExperimental = GL_TRUE; if (glewInit() != GLEW_OK) { std::cerr << "Failed to initialize GLEW\n"; } while(glGetError() != GL_NO_ERROR) {} glGenBuffers(1, &bufferObj); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferObj); glBufferData( GL_PIXEL_UNPACK_BUFFER_ARB, GlobalSettings::RES_X * GlobalSettings::RES_Y * 4, NULL, GL_DYNAMIC_DRAW_ARB ); } void Clean_OpenGL_and_CUDA() { cudaGraphicsUnregisterResource( resource ); glBindBuffer( GL_PIXEL_UNPACK_BUFFER_ARB, 0 ); glDeleteBuffers( 1, &bufferObj ); }
defecd7ed9a3a11b6689ee62159fefc5d52e1e23.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Copyright-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "CascadedCompressionGPU.h" #include "BitPackGPU.h" #include "CascadedMetadata.h" #include "CascadedMetadataOnGPU.h" #include "Check.h" #include "CudaUtils.h" #include "DeltaGPU.h" #include "RunLengthEncodeGPU.h" #include "TempSpaceBroker.h" #include "nvcomp.h" #include "nvcomp.hpp" #include "type_macros.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iostream> #include <limits> #include <memory> #include <sstream> #include <stdexcept> #include <string> namespace nvcomp { namespace highlevel { /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ namespace { template <typename T> __global__ void dereferenceDevice(T* const outValue, T* const* const ref) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *outValue = **ref; } template <typename T> __global__ void configureBitPackHeader( CascadedMetadata::Header* const header, T** const minValueDevicePtr, unsigned char** const numBitsDevicePtr) { // setup the header and pointers into it assert(blockIdx.x == 0); assert(threadIdx.x == 0); *minValueDevicePtr = CascadedMetadata::getMinValueLocation<T>(header); *numBitsDevicePtr = &header->numBits; } /** * @brief Asynchronously perform a device to device copy, where the destination * address and number of elements to copy are stored on the device. * * @tparam T The type of element to copy. * @tparam BLOCK_SIZE The size of each thread block. * @param destDPtr The pointer to the destination address to copy elements to, * stored on the device. * @param src The source address to copy elements from. * @param numElementsDPtr The number of elements to copy, stored on the device. */ template <typename T, int BLOCK_SIZE> __global__ void deferredCopy( T** const destDPtr, const T* const src, const size_t* const numElementsDPtr) { assert(blockDim.x == BLOCK_SIZE); T* const dest = *destDPtr; const size_t num = *numElementsDPtr; for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num; idx += gridDim.x * BLOCK_SIZE) { dest[idx] = src[idx]; } } /** * @brief Asynchronously perform a device to device copy, where the number of * elements to copy is stored on the device. * * @tparam T The type of element to copy. * @tparam BLOCK_SIZE The size of each thread block to use. * @param dest The destination address to copy to. * @param src The source address to copy from. * @param numElementsDPtr The number of elements to copy, stored on the device. */ template <typename T, int BLOCK_SIZE> __global__ void deferredCopy( T* const dest, const T* const src, const size_t* const numElementsDPtr) { assert(blockDim.x == BLOCK_SIZE); const size_t num = *numElementsDPtr; for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num; idx += gridDim.x * BLOCK_SIZE) { dest[idx] = src[idx]; } } template <typename T> __global__ void offsetPointerAsync(T* const src, T** const dst, const size_t* const offset) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *dst = src + *offset; } __global__ void offsetAndAlignPointerAsync( void* const src, void** const dst, size_t* const offset) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); // update the offset if we need to const size_t unalignedOffset = *offset; const size_t alignedOffset = roundUpTo(unalignedOffset, sizeof(size_t)); if (alignedOffset != unalignedOffset) { *offset = alignedOffset; } *dst = static_cast<char*>(src) + alignedOffset; } template <typename VALUE, typename RUN> __global__ void configTempSpacePointers( VALUE* const vals, VALUE** const valsPtr, RUN* const runs, RUN** const runsPtr, VALUE* const delta, VALUE** const deltaPtr) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *valsPtr = vals; *runsPtr = runs; *deltaPtr = delta; } template <typename T> __global__ void increaseOffsetByBitPacking( size_t* const offsetDevice, const CascadedMetadata::Header* const header) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); const size_t temp_size = roundUpTo( roundUpDiv(header->length * header->numBits, 8ULL), sizeof(T)); *offsetDevice += temp_size; } template <typename T> __global__ void increaseOffsetByRaw( size_t* const offsetDevice, const CascadedMetadata::Header* const header) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); const size_t temp_size = header->length * sizeof(T); *offsetDevice += temp_size; } /** * @brief This kernel allows copying to the device from a stack variable * asynchronously. * * @tparam T The type of variable to copy. * @param hostValue The value to copy. * @param deviceValue The location to copy to. */ template <typename T> __global__ void asyncPODCopyKernel(const T hostValue, T* const deviceValue) { static_assert(std::is_pod<T>::value, "Must be a POD to do async copy."); assert(threadIdx.x == 0); assert(blockIdx.x == 0); *deviceValue = hostValue; } } // namespace /****************************************************************************** * HELPER FUNCTIONS *********************************************************** *****************************************************************************/ namespace { void checkAlignmentOf(void* const ptr, const size_t alignment) { void* aligned_ptr = ptr; size_t space = alignment; if (std::align(alignment, alignment, aligned_ptr, space) == nullptr || ptr != aligned_ptr) { std::ostringstream oss; oss << ptr; throw std::runtime_error( "Incorrectly aligned buffer: " + oss.str() + ", should be aligned to " + std::to_string(alignment)); } } /** * @brief This copies the input to the device from a stack variable * asynchronously. While this is inefficient, it is better than synchronizing or * pinning the variable. * * @tparam T The type of variable to copy. * @param hostValue The value to copy. * @param deviceValue The location to copy to. */ template <typename T> void asyncPODCopy(const T& value, T* const destination, hipStream_t stream) { hipLaunchKernelGGL(( asyncPODCopyKernel), dim3(dim3(1)), dim3(dim3(1)), 0, stream, value, destination); CudaUtils::check_last_error("Failed to launch asyncPODCopyKernel"); } /** * @brief Bit pack or copy the elements to an output address. * * @tparam T The type of element to pack/copy. * @param headerDPtr The header, stored on the device. * @param temp_ptr The temporary workspace allocated (on the device). * @param temp_bytes The size of the temporary workspace. * @param outputDPtr The pointer to the location to output the elements to (on * the device), stored on the device. * @param input The input elements (on the device). * @param numElementsDPtr The pointer to the number of elements, stored on the * device. * @param maxNum The maximum number of elements. * @param offsetDPtr The current offset output, to be increased by * the number of bytes written by this function. * @param bitPacking Whether or not to perform bitpacking on this data. * @param stream The stream to asynchronously perform work on. */ template <typename T> void packToOutput( CascadedMetadata::Header* const headerDPtr, void* const temp_ptr, const size_t temp_bytes, void** const outputDPtr, const T* const input, const size_t* const numElementsDPtr, const size_t maxNum, size_t* const offsetDPtr, const bool bitPacking, hipStream_t stream) { CudaUtils::copy_async( &(headerDPtr->length), numElementsDPtr, 1, DEVICE_TO_DEVICE, stream); if (bitPacking) { TempSpaceBroker tempSpace(temp_ptr, temp_bytes); void** bitPackOutputPtr; void** minValueDevicePtr; unsigned char** numBitsDevicePtr; tempSpace.reserve(&bitPackOutputPtr, 1); tempSpace.reserve(&minValueDevicePtr, 1); tempSpace.reserve(&numBitsDevicePtr, 1); hipLaunchKernelGGL(( configureBitPackHeader), dim3(1), dim3(1), 0, stream, headerDPtr, reinterpret_cast<T**>(minValueDevicePtr), numBitsDevicePtr); CudaUtils::check_last_error("Failed to launch configureBitPackHeader"); void* const packTemp = reinterpret_cast<void*>(numBitsDevicePtr + 1); const size_t packTempSize = temp_bytes - (static_cast<char*>(packTemp) - static_cast<char*>(temp_ptr)); BitPackGPU::compress( packTemp, packTempSize, TypeOf<T>(), outputDPtr, input, numElementsDPtr, maxNum, minValueDevicePtr, numBitsDevicePtr, stream); hipLaunchKernelGGL(( increaseOffsetByBitPacking<T>), dim3(1), dim3(1), 0, stream, offsetDPtr, headerDPtr); CudaUtils::check_last_error("Failed to launch increaseOffsetByBitPacking"); } else { constexpr const int BLOCK_SIZE = 512; const dim3 grid(::min(1024, roundUpDiv<int, int>(maxNum, BLOCK_SIZE))); const dim3 block(BLOCK_SIZE); hipLaunchKernelGGL(( deferredCopy<T, BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream, reinterpret_cast<T**>(outputDPtr), input, numElementsDPtr); CudaUtils::check_last_error("Failed to launch deferredCopy"); hipLaunchKernelGGL(( increaseOffsetByRaw<T>), dim3(1), dim3(1), 0, stream, offsetDPtr, headerDPtr); CudaUtils::check_last_error("Failed to launch increaseOffsetByRaw"); } } template <typename valT, typename runT> void generateTypedOutputUpperBound( const size_t in_bytes, const nvcompCascadedFormatOpts* const opts, size_t* const out_bytes) { CascadedMetadata metadata(*opts, TypeOf<valT>(), in_bytes, 0); const int numRLEs = metadata.getNumRLEs(); const int numDeltas = metadata.getNumDeltas(); const bool bitPacking = metadata.useBitPacking(); // assume single chunk for now // TODO: implement a multi-chunk version const size_t outputSize = in_bytes / sizeof(valT); assert(outputSize * sizeof(valT) == in_bytes); int vals_id = 0; // initialize config nvcompType_t type = TypeOf<valT>(); nvcompIntConfig_t* config = createConfig(&metadata); // First past - set layers assume nothing actual compresses. // TODO: This will be a // gross over estimation of the output size, but the better option would // be to probably just assume 1:1 output/input, and error out during // compression if we fail to achieve that (maybe just set RLE, Delta, and BP // to 0, and do a memcpy, so that user's wont have to handle the error case // in their code). // A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally // having bit packing applied const int numSteps = ::max(numRLEs, numDeltas); for (int r = numSteps - 1; r >= 0; r--) { const int inputId = vals_id; if (numSteps - r - 1 < numRLEs) { const int runId = ++vals_id; const int valId = ++vals_id; nvcompConfigAddRLE_BP( config, inputId, outputSize, valId, type, bitPacking, runId, type, bitPacking); // store vals (apply delta if necessary) if (numRLEs - 1 - r < numDeltas) { const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( config, valId, outputSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( config, deltaId, outputSize, valId, type, 0); // no bitpacking when delta is used as an intermediate step } } } else { // RLE-less step const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( config, inputId, outputSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( config, deltaId, outputSize, inputId, type, 0); // no bitpacking when delta is used as an intermediate step } } } destroyConfig(config); // we will abort compression if we can't fit into out_bytes. const size_t serializedMetadataSize = CascadedMetadataOnGPU::getSerializedSizeOf(metadata); // This may be overkill, as most datatypes we use are aligned to size_t, // which on x86_64 is 8 bytes, where as this will be 16 bytes. In theory a // smart compiler could potentially generate instructions for some of our // structure that at 16-byte aligned. const size_t wordSize = alignof(std::max_align_t); // space for metadata, each set of 'runs', one set of 'vals'. *out_bytes = roundUpTo(serializedMetadataSize, wordSize) + roundUpTo(sizeof(runT) * outputSize, wordSize) * numRLEs + roundUpTo(sizeof(valT) * outputSize, wordSize); } template <typename valT, typename runT> void compressTypedAsync( const void* const in_ptr, const size_t in_bytes, const nvcompCascadedFormatOpts* const format_opts, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, size_t* const out_bytes, hipStream_t stream) { const nvcompType_t type = TypeOf<valT>(); CascadedMetadata metadata(*format_opts, type, in_bytes, 0); const int numRLEs = metadata.getNumRLEs(); const int numDeltas = metadata.getNumDeltas(); const bool bitPacking = metadata.useBitPacking(); // assume single chunk for now // TODO: implement a multi-chunk version const size_t maxNum = in_bytes / sizeof(valT); int vals_id = 0; TempSpaceBroker tempSpace(temp_ptr, temp_bytes); size_t* offsetDevice; tempSpace.reserve(&offsetDevice, 1); CascadedMetadataOnGPU metadataOnGPU( out_ptr, CascadedMetadataOnGPU::getSerializedSizeOf(metadata)); metadataOnGPU.copyToGPU(metadata, offsetDevice, stream); valT* vals_delta = nullptr; valT* vals_output = nullptr; runT* runs_output = nullptr; if (numRLEs > 0 || numDeltas > 0) { tempSpace.reserve(&vals_output, maxNum); if (numRLEs > 0) { tempSpace.reserve(&runs_output, maxNum); } tempSpace.reserve(&vals_delta, maxNum); } size_t* numRunsDevice; size_t* outputSizePtr; tempSpace.reserve(&numRunsDevice, 1); tempSpace.reserve(&outputSizePtr, 1); runT** runs_output_ptr; valT** vals_output_ptr; valT** vals_delta_ptr; tempSpace.reserve(&runs_output_ptr, 1); tempSpace.reserve(&vals_output_ptr, 1); tempSpace.reserve(&vals_delta_ptr, 1); void** bit_out_ptr; tempSpace.reserve(&bit_out_ptr, 1); hipError_t* statusDevice; tempSpace.reserve(&statusDevice, 1); hipLaunchKernelGGL(( configTempSpacePointers), dim3(1), dim3(1), 0, stream, vals_output, vals_output_ptr, runs_output, runs_output_ptr, vals_delta, vals_delta_ptr); CudaUtils::check_last_error("Failed to launch configTempSpacePointers"); // Set first offset to end of metadata metadataOnGPU.saveOffset(vals_id, offsetDevice, stream); // Second pass - perform compression and store in the memory allocated above. // A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally // having bit packing applied const int numSteps = ::max(numRLEs, numDeltas); for (int r = numSteps - 1; r >= 0; r--) { int nextValId; const bool firstLayer = r == ::max(numRLEs - 1, numDeltas - 1); const valT* const vals_input = firstLayer ? static_cast<const valT*>(in_ptr) : vals_delta; if (numSteps - r - 1 < numRLEs) { const int runId = ++vals_id; const int valId = ++vals_id; // rle always first if (firstLayer) { RunLengthEncodeGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), TypeOf<valT>(), vals_output, TypeOf<runT>(), runs_output, numRunsDevice, vals_input, maxNum, stream); } else { RunLengthEncodeGPU::compressDownstream( tempSpace.next(), tempSpace.spaceLeft(), TypeOf<valT>(), (void**)vals_output_ptr, TypeOf<runT>(), (void**)runs_output_ptr, numRunsDevice, vals_input, outputSizePtr, maxNum, stream); } // save initial offset CascadedMetadata::Header* const valHdr = metadataOnGPU.getHeaderLocation(valId); CudaUtils::copy_async( &(valHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(valId, offsetDevice, stream); CascadedMetadata::Header* const runHdr = metadataOnGPU.getHeaderLocation(runId); CudaUtils::copy_async( &(runHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); // store vals (apply delta if necessary) if (numRLEs - 1 - r < numDeltas) { DeltaGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), TypeOf<valT>(), (void**)vals_delta_ptr, vals_output, numRunsDevice, maxNum, stream); const int id = ++vals_id; nextValId = id; CascadedMetadata::Header* const hdr = metadataOnGPU.getHeaderLocation(id); CudaUtils::copy_async( &(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(id, offsetDevice, stream); } else { constexpr const int COPY_BLOCK_SIZE = 512; const dim3 grid(::min( 4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE)))); const dim3 block(COPY_BLOCK_SIZE); hipLaunchKernelGGL(( deferredCopy<valT, COPY_BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream, vals_delta, vals_output, numRunsDevice); CudaUtils::check_last_error("Failed to launch deferredCopy"); nextValId = valId; } hipLaunchKernelGGL(( offsetAndAlignPointerAsync), dim3(1), dim3(1), 0, stream, out_ptr, bit_out_ptr, offsetDevice); CudaUtils::check_last_error("Failed to launch " "offsetAndAlignPointerAsync"); metadataOnGPU.saveOffset(runId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(runId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, runs_output, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } else { if (!firstLayer) { CudaUtils::copy_async( numRunsDevice, outputSizePtr, 1, DEVICE_TO_DEVICE, stream); } else { CudaUtils::copy_async( numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream); } // No RLE DeltaGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), TypeOf<valT>(), (void**)vals_output_ptr, vals_input, numRunsDevice, maxNum, stream); // we need to copy the delta to final delta buffer { constexpr const int COPY_BLOCK_SIZE = 512; const dim3 grid(::min( 4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE)))); const dim3 block(COPY_BLOCK_SIZE); hipLaunchKernelGGL(( deferredCopy<valT, COPY_BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream, vals_delta, vals_output, numRunsDevice); CudaUtils::check_last_error("Failed to launch deferredCopy"); } const int id = ++vals_id; nextValId = id; CascadedMetadata::Header* const hdr = metadataOnGPU.getHeaderLocation(id); CudaUtils::copy_async( &(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(id, offsetDevice, stream); } if (r == 0) { hipLaunchKernelGGL(( offsetAndAlignPointerAsync), dim3(1), dim3(1), 0, stream, out_ptr, bit_out_ptr, offsetDevice); CudaUtils::check_last_error("Failed to launch " "offsetAndAlignPointerAsync"); metadataOnGPU.saveOffset(nextValId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(nextValId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, vals_delta, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } else { // update current RLE size CudaUtils::copy_async( outputSizePtr, numRunsDevice, 1, DEVICE_TO_DEVICE, stream); } } // If there are no RLEs or Deltas, we will do a single BP step. if (numRLEs == 0 && numDeltas == 0) { const int nextValId = ++vals_id; const valT* const vals_input = static_cast<const valT*>(in_ptr); CudaUtils::copy_async(numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream); hipLaunchKernelGGL(( offsetAndAlignPointerAsync), dim3(1), dim3(1), 0, stream, out_ptr, bit_out_ptr, offsetDevice); CudaUtils::check_last_error("Failed to launch offsetAndAlignPointerAsync"); metadataOnGPU.saveOffset(nextValId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(nextValId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, vals_input, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } // async copy output metadataOnGPU.setCompressedSizeFromGPU(offsetDevice, stream); if (CudaUtils::is_device_pointer(out_bytes)) { CudaUtils::copy_async(out_bytes, offsetDevice, 1, DEVICE_TO_DEVICE, stream); } else { CudaUtils::copy_async(out_bytes, offsetDevice, 1, DEVICE_TO_HOST, stream); } } } // namespace /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ void nvcompCascadedCompressionGPU::computeWorkspaceSize( const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const opts, size_t* const temp_bytes) { size_t kernelBytes = 0; // get at least enough for intermediate gpu values size_t ioBytes = 1024; const size_t numIn = in_bytes / sizeOfnvcompType(in_type); const nvcompType_t runType = selectRunsType(numIn); if (opts->use_bp) { // max of runs and values kernelBytes = ::max( kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, in_type)); kernelBytes = ::max( kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, runType)); } if (opts->num_deltas > 0) { kernelBytes = ::max( kernelBytes, DeltaGPU::requiredWorkspaceSize(numIn, in_type)); } if (opts->num_RLEs > 0) { kernelBytes = ::max( kernelBytes, RunLengthEncodeGPU::requiredWorkspaceSize(numIn, in_type, runType)); ioBytes += (2 * in_bytes) + numIn * sizeOfnvcompType(runType); } else if (opts->num_deltas > 0) { ioBytes += 2 * in_bytes; } *temp_bytes = kernelBytes + ioBytes; } void nvcompCascadedCompressionGPU::generateOutputUpperBound( const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const opts, size_t* const out_bytes) { CHECK_NOT_NULL(opts); CHECK_NOT_NULL(out_bytes); const nvcompType_t countType = selectRunsType(in_bytes / sizeOfnvcompType(in_type)); NVCOMP_TYPE_TWO_SWITCH( in_type, countType, generateTypedOutputUpperBound, in_bytes, opts, out_bytes); } void nvcompCascadedCompressionGPU::compressAsync( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const cascadedOpts, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, size_t* const out_bytes, hipStream_t stream) { CHECK_NOT_NULL(in_ptr); CHECK_NOT_NULL(cascadedOpts); CHECK_NOT_NULL(temp_ptr); CHECK_NOT_NULL(out_ptr); CHECK_NOT_NULL(out_bytes); checkAlignmentOf(out_ptr, sizeof(size_t)); checkAlignmentOf(temp_ptr, sizeof(size_t)); const nvcompType_t countType = selectRunsType(in_bytes / sizeOfnvcompType(in_type)); NVCOMP_TYPE_TWO_SWITCH( in_type, countType, compressTypedAsync, in_ptr, in_bytes, cascadedOpts, temp_ptr, temp_bytes, out_ptr, out_bytes, stream); } } // namespace highlevel } // namespace nvcomp
defecd7ed9a3a11b6689ee62159fefc5d52e1e23.cu
/* * Copyright (c) Copyright-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "CascadedCompressionGPU.h" #include "BitPackGPU.h" #include "CascadedMetadata.h" #include "CascadedMetadataOnGPU.h" #include "Check.h" #include "CudaUtils.h" #include "DeltaGPU.h" #include "RunLengthEncodeGPU.h" #include "TempSpaceBroker.h" #include "nvcomp.h" #include "nvcomp.hpp" #include "type_macros.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iostream> #include <limits> #include <memory> #include <sstream> #include <stdexcept> #include <string> namespace nvcomp { namespace highlevel { /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ namespace { template <typename T> __global__ void dereferenceDevice(T* const outValue, T* const* const ref) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *outValue = **ref; } template <typename T> __global__ void configureBitPackHeader( CascadedMetadata::Header* const header, T** const minValueDevicePtr, unsigned char** const numBitsDevicePtr) { // setup the header and pointers into it assert(blockIdx.x == 0); assert(threadIdx.x == 0); *minValueDevicePtr = CascadedMetadata::getMinValueLocation<T>(header); *numBitsDevicePtr = &header->numBits; } /** * @brief Asynchronously perform a device to device copy, where the destination * address and number of elements to copy are stored on the device. * * @tparam T The type of element to copy. * @tparam BLOCK_SIZE The size of each thread block. * @param destDPtr The pointer to the destination address to copy elements to, * stored on the device. * @param src The source address to copy elements from. * @param numElementsDPtr The number of elements to copy, stored on the device. */ template <typename T, int BLOCK_SIZE> __global__ void deferredCopy( T** const destDPtr, const T* const src, const size_t* const numElementsDPtr) { assert(blockDim.x == BLOCK_SIZE); T* const dest = *destDPtr; const size_t num = *numElementsDPtr; for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num; idx += gridDim.x * BLOCK_SIZE) { dest[idx] = src[idx]; } } /** * @brief Asynchronously perform a device to device copy, where the number of * elements to copy is stored on the device. * * @tparam T The type of element to copy. * @tparam BLOCK_SIZE The size of each thread block to use. * @param dest The destination address to copy to. * @param src The source address to copy from. * @param numElementsDPtr The number of elements to copy, stored on the device. */ template <typename T, int BLOCK_SIZE> __global__ void deferredCopy( T* const dest, const T* const src, const size_t* const numElementsDPtr) { assert(blockDim.x == BLOCK_SIZE); const size_t num = *numElementsDPtr; for (int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; idx < num; idx += gridDim.x * BLOCK_SIZE) { dest[idx] = src[idx]; } } template <typename T> __global__ void offsetPointerAsync(T* const src, T** const dst, const size_t* const offset) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *dst = src + *offset; } __global__ void offsetAndAlignPointerAsync( void* const src, void** const dst, size_t* const offset) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); // update the offset if we need to const size_t unalignedOffset = *offset; const size_t alignedOffset = roundUpTo(unalignedOffset, sizeof(size_t)); if (alignedOffset != unalignedOffset) { *offset = alignedOffset; } *dst = static_cast<char*>(src) + alignedOffset; } template <typename VALUE, typename RUN> __global__ void configTempSpacePointers( VALUE* const vals, VALUE** const valsPtr, RUN* const runs, RUN** const runsPtr, VALUE* const delta, VALUE** const deltaPtr) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); *valsPtr = vals; *runsPtr = runs; *deltaPtr = delta; } template <typename T> __global__ void increaseOffsetByBitPacking( size_t* const offsetDevice, const CascadedMetadata::Header* const header) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); const size_t temp_size = roundUpTo( roundUpDiv(header->length * header->numBits, 8ULL), sizeof(T)); *offsetDevice += temp_size; } template <typename T> __global__ void increaseOffsetByRaw( size_t* const offsetDevice, const CascadedMetadata::Header* const header) { assert(threadIdx.x == 0); assert(blockIdx.x == 0); const size_t temp_size = header->length * sizeof(T); *offsetDevice += temp_size; } /** * @brief This kernel allows copying to the device from a stack variable * asynchronously. * * @tparam T The type of variable to copy. * @param hostValue The value to copy. * @param deviceValue The location to copy to. */ template <typename T> __global__ void asyncPODCopyKernel(const T hostValue, T* const deviceValue) { static_assert(std::is_pod<T>::value, "Must be a POD to do async copy."); assert(threadIdx.x == 0); assert(blockIdx.x == 0); *deviceValue = hostValue; } } // namespace /****************************************************************************** * HELPER FUNCTIONS *********************************************************** *****************************************************************************/ namespace { void checkAlignmentOf(void* const ptr, const size_t alignment) { void* aligned_ptr = ptr; size_t space = alignment; if (std::align(alignment, alignment, aligned_ptr, space) == nullptr || ptr != aligned_ptr) { std::ostringstream oss; oss << ptr; throw std::runtime_error( "Incorrectly aligned buffer: " + oss.str() + ", should be aligned to " + std::to_string(alignment)); } } /** * @brief This copies the input to the device from a stack variable * asynchronously. While this is inefficient, it is better than synchronizing or * pinning the variable. * * @tparam T The type of variable to copy. * @param hostValue The value to copy. * @param deviceValue The location to copy to. */ template <typename T> void asyncPODCopy(const T& value, T* const destination, cudaStream_t stream) { asyncPODCopyKernel<<<dim3(1), dim3(1), 0, stream>>>(value, destination); CudaUtils::check_last_error("Failed to launch asyncPODCopyKernel"); } /** * @brief Bit pack or copy the elements to an output address. * * @tparam T The type of element to pack/copy. * @param headerDPtr The header, stored on the device. * @param temp_ptr The temporary workspace allocated (on the device). * @param temp_bytes The size of the temporary workspace. * @param outputDPtr The pointer to the location to output the elements to (on * the device), stored on the device. * @param input The input elements (on the device). * @param numElementsDPtr The pointer to the number of elements, stored on the * device. * @param maxNum The maximum number of elements. * @param offsetDPtr The current offset output, to be increased by * the number of bytes written by this function. * @param bitPacking Whether or not to perform bitpacking on this data. * @param stream The stream to asynchronously perform work on. */ template <typename T> void packToOutput( CascadedMetadata::Header* const headerDPtr, void* const temp_ptr, const size_t temp_bytes, void** const outputDPtr, const T* const input, const size_t* const numElementsDPtr, const size_t maxNum, size_t* const offsetDPtr, const bool bitPacking, cudaStream_t stream) { CudaUtils::copy_async( &(headerDPtr->length), numElementsDPtr, 1, DEVICE_TO_DEVICE, stream); if (bitPacking) { TempSpaceBroker tempSpace(temp_ptr, temp_bytes); void** bitPackOutputPtr; void** minValueDevicePtr; unsigned char** numBitsDevicePtr; tempSpace.reserve(&bitPackOutputPtr, 1); tempSpace.reserve(&minValueDevicePtr, 1); tempSpace.reserve(&numBitsDevicePtr, 1); configureBitPackHeader<<<1, 1, 0, stream>>>( headerDPtr, reinterpret_cast<T**>(minValueDevicePtr), numBitsDevicePtr); CudaUtils::check_last_error("Failed to launch configureBitPackHeader"); void* const packTemp = reinterpret_cast<void*>(numBitsDevicePtr + 1); const size_t packTempSize = temp_bytes - (static_cast<char*>(packTemp) - static_cast<char*>(temp_ptr)); BitPackGPU::compress( packTemp, packTempSize, TypeOf<T>(), outputDPtr, input, numElementsDPtr, maxNum, minValueDevicePtr, numBitsDevicePtr, stream); increaseOffsetByBitPacking<T><<<1, 1, 0, stream>>>(offsetDPtr, headerDPtr); CudaUtils::check_last_error("Failed to launch increaseOffsetByBitPacking"); } else { constexpr const int BLOCK_SIZE = 512; const dim3 grid(std::min(1024, roundUpDiv<int, int>(maxNum, BLOCK_SIZE))); const dim3 block(BLOCK_SIZE); deferredCopy<T, BLOCK_SIZE><<<grid, block, 0, stream>>>( reinterpret_cast<T**>(outputDPtr), input, numElementsDPtr); CudaUtils::check_last_error("Failed to launch deferredCopy"); increaseOffsetByRaw<T><<<1, 1, 0, stream>>>(offsetDPtr, headerDPtr); CudaUtils::check_last_error("Failed to launch increaseOffsetByRaw"); } } template <typename valT, typename runT> void generateTypedOutputUpperBound( const size_t in_bytes, const nvcompCascadedFormatOpts* const opts, size_t* const out_bytes) { CascadedMetadata metadata(*opts, TypeOf<valT>(), in_bytes, 0); const int numRLEs = metadata.getNumRLEs(); const int numDeltas = metadata.getNumDeltas(); const bool bitPacking = metadata.useBitPacking(); // assume single chunk for now // TODO: implement a multi-chunk version const size_t outputSize = in_bytes / sizeof(valT); assert(outputSize * sizeof(valT) == in_bytes); int vals_id = 0; // initialize config nvcompType_t type = TypeOf<valT>(); nvcompIntConfig_t* config = createConfig(&metadata); // First past - set layers assume nothing actual compresses. // TODO: This will be a // gross over estimation of the output size, but the better option would // be to probably just assume 1:1 output/input, and error out during // compression if we fail to achieve that (maybe just set RLE, Delta, and BP // to 0, and do a memcpy, so that user's wont have to handle the error case // in their code). // A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally // having bit packing applied const int numSteps = std::max(numRLEs, numDeltas); for (int r = numSteps - 1; r >= 0; r--) { const int inputId = vals_id; if (numSteps - r - 1 < numRLEs) { const int runId = ++vals_id; const int valId = ++vals_id; nvcompConfigAddRLE_BP( config, inputId, outputSize, valId, type, bitPacking, runId, type, bitPacking); // store vals (apply delta if necessary) if (numRLEs - 1 - r < numDeltas) { const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( config, valId, outputSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( config, deltaId, outputSize, valId, type, 0); // no bitpacking when delta is used as an intermediate step } } } else { // RLE-less step const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( config, inputId, outputSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( config, deltaId, outputSize, inputId, type, 0); // no bitpacking when delta is used as an intermediate step } } } destroyConfig(config); // we will abort compression if we can't fit into out_bytes. const size_t serializedMetadataSize = CascadedMetadataOnGPU::getSerializedSizeOf(metadata); // This may be overkill, as most datatypes we use are aligned to size_t, // which on x86_64 is 8 bytes, where as this will be 16 bytes. In theory a // smart compiler could potentially generate instructions for some of our // structure that at 16-byte aligned. const size_t wordSize = alignof(std::max_align_t); // space for metadata, each set of 'runs', one set of 'vals'. *out_bytes = roundUpTo(serializedMetadataSize, wordSize) + roundUpTo(sizeof(runT) * outputSize, wordSize) * numRLEs + roundUpTo(sizeof(valT) * outputSize, wordSize); } template <typename valT, typename runT> void compressTypedAsync( const void* const in_ptr, const size_t in_bytes, const nvcompCascadedFormatOpts* const format_opts, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, size_t* const out_bytes, cudaStream_t stream) { const nvcompType_t type = TypeOf<valT>(); CascadedMetadata metadata(*format_opts, type, in_bytes, 0); const int numRLEs = metadata.getNumRLEs(); const int numDeltas = metadata.getNumDeltas(); const bool bitPacking = metadata.useBitPacking(); // assume single chunk for now // TODO: implement a multi-chunk version const size_t maxNum = in_bytes / sizeof(valT); int vals_id = 0; TempSpaceBroker tempSpace(temp_ptr, temp_bytes); size_t* offsetDevice; tempSpace.reserve(&offsetDevice, 1); CascadedMetadataOnGPU metadataOnGPU( out_ptr, CascadedMetadataOnGPU::getSerializedSizeOf(metadata)); metadataOnGPU.copyToGPU(metadata, offsetDevice, stream); valT* vals_delta = nullptr; valT* vals_output = nullptr; runT* runs_output = nullptr; if (numRLEs > 0 || numDeltas > 0) { tempSpace.reserve(&vals_output, maxNum); if (numRLEs > 0) { tempSpace.reserve(&runs_output, maxNum); } tempSpace.reserve(&vals_delta, maxNum); } size_t* numRunsDevice; size_t* outputSizePtr; tempSpace.reserve(&numRunsDevice, 1); tempSpace.reserve(&outputSizePtr, 1); runT** runs_output_ptr; valT** vals_output_ptr; valT** vals_delta_ptr; tempSpace.reserve(&runs_output_ptr, 1); tempSpace.reserve(&vals_output_ptr, 1); tempSpace.reserve(&vals_delta_ptr, 1); void** bit_out_ptr; tempSpace.reserve(&bit_out_ptr, 1); cudaError_t* statusDevice; tempSpace.reserve(&statusDevice, 1); configTempSpacePointers<<<1, 1, 0, stream>>>( vals_output, vals_output_ptr, runs_output, runs_output_ptr, vals_delta, vals_delta_ptr); CudaUtils::check_last_error("Failed to launch configTempSpacePointers"); // Set first offset to end of metadata metadataOnGPU.saveOffset(vals_id, offsetDevice, stream); // Second pass - perform compression and store in the memory allocated above. // A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally // having bit packing applied const int numSteps = std::max(numRLEs, numDeltas); for (int r = numSteps - 1; r >= 0; r--) { int nextValId; const bool firstLayer = r == std::max(numRLEs - 1, numDeltas - 1); const valT* const vals_input = firstLayer ? static_cast<const valT*>(in_ptr) : vals_delta; if (numSteps - r - 1 < numRLEs) { const int runId = ++vals_id; const int valId = ++vals_id; // rle always first if (firstLayer) { RunLengthEncodeGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), TypeOf<valT>(), vals_output, TypeOf<runT>(), runs_output, numRunsDevice, vals_input, maxNum, stream); } else { RunLengthEncodeGPU::compressDownstream( tempSpace.next(), tempSpace.spaceLeft(), TypeOf<valT>(), (void**)vals_output_ptr, TypeOf<runT>(), (void**)runs_output_ptr, numRunsDevice, vals_input, outputSizePtr, maxNum, stream); } // save initial offset CascadedMetadata::Header* const valHdr = metadataOnGPU.getHeaderLocation(valId); CudaUtils::copy_async( &(valHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(valId, offsetDevice, stream); CascadedMetadata::Header* const runHdr = metadataOnGPU.getHeaderLocation(runId); CudaUtils::copy_async( &(runHdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); // store vals (apply delta if necessary) if (numRLEs - 1 - r < numDeltas) { DeltaGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), TypeOf<valT>(), (void**)vals_delta_ptr, vals_output, numRunsDevice, maxNum, stream); const int id = ++vals_id; nextValId = id; CascadedMetadata::Header* const hdr = metadataOnGPU.getHeaderLocation(id); CudaUtils::copy_async( &(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(id, offsetDevice, stream); } else { constexpr const int COPY_BLOCK_SIZE = 512; const dim3 grid(std::min( 4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE)))); const dim3 block(COPY_BLOCK_SIZE); deferredCopy<valT, COPY_BLOCK_SIZE><<<grid, block, 0, stream>>>( vals_delta, vals_output, numRunsDevice); CudaUtils::check_last_error("Failed to launch deferredCopy"); nextValId = valId; } offsetAndAlignPointerAsync<<<1, 1, 0, stream>>>( out_ptr, bit_out_ptr, offsetDevice); CudaUtils::check_last_error("Failed to launch " "offsetAndAlignPointerAsync"); metadataOnGPU.saveOffset(runId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(runId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, runs_output, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } else { if (!firstLayer) { CudaUtils::copy_async( numRunsDevice, outputSizePtr, 1, DEVICE_TO_DEVICE, stream); } else { CudaUtils::copy_async( numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream); } // No RLE DeltaGPU::compress( tempSpace.next(), tempSpace.spaceLeft(), TypeOf<valT>(), (void**)vals_output_ptr, vals_input, numRunsDevice, maxNum, stream); // we need to copy the delta to final delta buffer { constexpr const int COPY_BLOCK_SIZE = 512; const dim3 grid(std::min( 4096, static_cast<int>(roundUpDiv(maxNum, COPY_BLOCK_SIZE)))); const dim3 block(COPY_BLOCK_SIZE); deferredCopy<valT, COPY_BLOCK_SIZE><<<grid, block, 0, stream>>>( vals_delta, vals_output, numRunsDevice); CudaUtils::check_last_error("Failed to launch deferredCopy"); } const int id = ++vals_id; nextValId = id; CascadedMetadata::Header* const hdr = metadataOnGPU.getHeaderLocation(id); CudaUtils::copy_async( &(hdr->length), numRunsDevice, 1, DEVICE_TO_DEVICE, stream); metadataOnGPU.saveOffset(id, offsetDevice, stream); } if (r == 0) { offsetAndAlignPointerAsync<<<1, 1, 0, stream>>>( out_ptr, bit_out_ptr, offsetDevice); CudaUtils::check_last_error("Failed to launch " "offsetAndAlignPointerAsync"); metadataOnGPU.saveOffset(nextValId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(nextValId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, vals_delta, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } else { // update current RLE size CudaUtils::copy_async( outputSizePtr, numRunsDevice, 1, DEVICE_TO_DEVICE, stream); } } // If there are no RLEs or Deltas, we will do a single BP step. if (numRLEs == 0 && numDeltas == 0) { const int nextValId = ++vals_id; const valT* const vals_input = static_cast<const valT*>(in_ptr); CudaUtils::copy_async(numRunsDevice, &maxNum, 1, HOST_TO_DEVICE, stream); offsetAndAlignPointerAsync<<<1, 1, 0, stream>>>( out_ptr, bit_out_ptr, offsetDevice); CudaUtils::check_last_error("Failed to launch offsetAndAlignPointerAsync"); metadataOnGPU.saveOffset(nextValId, offsetDevice, stream); // pack runs into bytes packToOutput( metadataOnGPU.getHeaderLocation(nextValId), tempSpace.next(), tempSpace.spaceLeft(), bit_out_ptr, vals_input, numRunsDevice, maxNum, offsetDevice, bitPacking, stream); } // async copy output metadataOnGPU.setCompressedSizeFromGPU(offsetDevice, stream); if (CudaUtils::is_device_pointer(out_bytes)) { CudaUtils::copy_async(out_bytes, offsetDevice, 1, DEVICE_TO_DEVICE, stream); } else { CudaUtils::copy_async(out_bytes, offsetDevice, 1, DEVICE_TO_HOST, stream); } } } // namespace /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ void nvcompCascadedCompressionGPU::computeWorkspaceSize( const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const opts, size_t* const temp_bytes) { size_t kernelBytes = 0; // get at least enough for intermediate gpu values size_t ioBytes = 1024; const size_t numIn = in_bytes / sizeOfnvcompType(in_type); const nvcompType_t runType = selectRunsType(numIn); if (opts->use_bp) { // max of runs and values kernelBytes = std::max( kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, in_type)); kernelBytes = std::max( kernelBytes, BitPackGPU::requiredWorkspaceSize(numIn, runType)); } if (opts->num_deltas > 0) { kernelBytes = std::max( kernelBytes, DeltaGPU::requiredWorkspaceSize(numIn, in_type)); } if (opts->num_RLEs > 0) { kernelBytes = std::max( kernelBytes, RunLengthEncodeGPU::requiredWorkspaceSize(numIn, in_type, runType)); ioBytes += (2 * in_bytes) + numIn * sizeOfnvcompType(runType); } else if (opts->num_deltas > 0) { ioBytes += 2 * in_bytes; } *temp_bytes = kernelBytes + ioBytes; } void nvcompCascadedCompressionGPU::generateOutputUpperBound( const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const opts, size_t* const out_bytes) { CHECK_NOT_NULL(opts); CHECK_NOT_NULL(out_bytes); const nvcompType_t countType = selectRunsType(in_bytes / sizeOfnvcompType(in_type)); NVCOMP_TYPE_TWO_SWITCH( in_type, countType, generateTypedOutputUpperBound, in_bytes, opts, out_bytes); } void nvcompCascadedCompressionGPU::compressAsync( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const cascadedOpts, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, size_t* const out_bytes, cudaStream_t stream) { CHECK_NOT_NULL(in_ptr); CHECK_NOT_NULL(cascadedOpts); CHECK_NOT_NULL(temp_ptr); CHECK_NOT_NULL(out_ptr); CHECK_NOT_NULL(out_bytes); checkAlignmentOf(out_ptr, sizeof(size_t)); checkAlignmentOf(temp_ptr, sizeof(size_t)); const nvcompType_t countType = selectRunsType(in_bytes / sizeOfnvcompType(in_type)); NVCOMP_TYPE_TWO_SWITCH( in_type, countType, compressTypedAsync, in_ptr, in_bytes, cascadedOpts, temp_ptr, temp_bytes, out_ptr, out_bytes, stream); } } // namespace highlevel } // namespace nvcomp
31b504f38a3860c65c18b0847946810e1f272d99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip_runtime.h" /// /// @file fdwt97.cu /// @brief CUDA implementation of forward 9/7 2D DWT. /// @author Martin Jirman ([email protected]) /// @date 2011-01-20 13:18 /// /// /// Copyright (c) 2011 Martin Jirman /// All rights reserved. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" /// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE /// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE /// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE /// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR /// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF /// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS /// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN /// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) /// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. /// #include "common.h" #include "transform_buffer.h" #include "io.h" namespace dwt_cuda { /// Wraps a buffer and methods for computing 9/7 FDWT with sliding window /// of specified size. Template arguments specify this size. /// @tparam WIN_SIZE_X width of sliding window /// @tparam WIN_SIZE_Y height of sliding window template <int WIN_SIZE_X, int WIN_SIZE_Y> class FDWT97 { private: /// Type of shared memory buffer used for 9/7 DWT. typedef TransformBuffer<float, WIN_SIZE_X, WIN_SIZE_Y + 7, 4> FDWT97Buffer; /// Actual shared buffer used for forward 9/7 DWT. FDWT97Buffer buffer; /// Difference of indices of two vertically neighboring items in buffer. enum { STRIDE = FDWT97Buffer::VERTICAL_STRIDE }; /// One thread's info about loading input image /// @tparam CHECKED true if loader should check for image boundaries template <bool CHECKED> struct FDWT97ColumnLoadingInfo { /// Loader of pixels from some input image. VerticalDWTPixelLoader<float, CHECKED> loader; /// Offset of column loaded by loader. (Offset in shared buffer.) int offset; }; /// Horizontal 9/7 FDWT on specified lines of transform buffer. /// @param lines number of lines to be transformed /// @param firstLine index of the first line to be transformed __device__ void horizontalFDWT97(const int lines, const int firstLine) { __syncthreads(); buffer.forEachHorizontalOdd(firstLine, lines, AddScaledSum(f97Predict1)); __syncthreads(); buffer.forEachHorizontalEven(firstLine, lines, AddScaledSum(f97Update1)); __syncthreads(); buffer.forEachHorizontalOdd(firstLine, lines, AddScaledSum(f97Predict2)); __syncthreads(); buffer.forEachHorizontalEven(firstLine, lines, AddScaledSum(f97Update2)); __syncthreads(); buffer.scaleHorizontal(scale97Div, scale97Mul, firstLine, lines); __syncthreads(); } /// Initializes one column of shared transform buffer with 7 input pixels. /// Those 7 pixels will not be transformed. Also initializes given loader. /// @tparam CHECKED true if loader should check for image boundaries /// @param column (uninitialized) object for loading input pixels /// @param columnIndex index (not offset!) of the column to be loaded /// (relative to threadblock's first column) /// @param input pointer to input image in GPU memory /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param firstY index of first row to be loaded from image template <bool CHECKED> __device__ void initColumn(FDWT97ColumnLoadingInfo<CHECKED> & column, const int columnIndex, const float * const input, const int sizeX, const int sizeY, const int firstY) { // get offset of the column with index 'columnIndex' column.offset = buffer.getColumnOffset(columnIndex); // x-coordinate of the first pixel to be loaded by given loader const int firstX = hipBlockIdx_x * WIN_SIZE_X + columnIndex; if(hipBlockIdx_y == 0) { // topmost block - apply mirroring rules when loading first 7 rows column.loader.init(sizeX, sizeY, firstX, firstY); // load pixels in mirrored way buffer[column.offset + 4 * STRIDE] = column.loader.loadFrom(input); buffer[column.offset + 3 * STRIDE] = buffer[column.offset + 5 * STRIDE] = column.loader.loadFrom(input); buffer[column.offset + 2 * STRIDE] = buffer[column.offset + 6 * STRIDE] = column.loader.loadFrom(input); buffer[column.offset + 1 * STRIDE] = column.loader.loadFrom(input); buffer[column.offset + 0 * STRIDE] = column.loader.loadFrom(input); // reinitialize loader to start with pixel #3 again column.loader.init(sizeX, sizeY, firstX, firstY + 3); } else { // non-topmost row - regular loading: column.loader.init(sizeX, sizeY, firstX, firstY - 4); // load 7 rows into the transform buffer for(int i = 0; i < 7; i++) { buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input); } } // Now, the next pixel, which will be loaded by loader, is pixel #3. } /// Loads another WIN_SIZE_Y pixels into given column using given loader. /// @tparam CHECKED true if loader should check for image boundaries /// @param input input image to load from /// @param column loader and offset of loaded column in shared buffer template <bool CHECKED> inline __device__ void loadWindowIntoColumn(const float * const input, FDWT97ColumnLoadingInfo<CHECKED> & column) { for(int i = 7; i < (7 + WIN_SIZE_Y); i++) { buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input); } } /// Main GPU 9/7 FDWT entry point. /// @tparam CHECK_LOADS true if boundaries should be checked when loading /// @tparam CHECK_WRITES true if boundaries should be checked when writing /// @param in input image /// @param out output buffer /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param winSteps number of steps of sliding window template <bool CHECK_LOADS, bool CHECK_WRITES> __device__ void transform(const float * const in, float * const out, const int sizeX, const int sizeY, const int winSteps) { // info about columns loaded by this thread: one main column and possibly // one boundary column. (Only some threads load some boundary column.) FDWT97ColumnLoadingInfo<CHECK_LOADS> loadedColumn; FDWT97ColumnLoadingInfo<CHECK_LOADS> boundaryColumn; // Initialize first 7 lines of transform buffer. const int firstY = hipBlockIdx_y * WIN_SIZE_Y * winSteps; initColumn(loadedColumn, hipThreadIdx_x, in, sizeX, sizeY, firstY); // Some threads initialize boundary columns. boundaryColumn.offset = 0; boundaryColumn.loader.clear(); if(hipThreadIdx_x < 7) { // each thread among first 7 ones gets index of one of boundary columns const int colId = hipThreadIdx_x + ((hipThreadIdx_x < 3) ? WIN_SIZE_X : -7); // Thread initializes offset of the boundary column (in shared buffer), // first 7 pixels of the column and a loader for this column. initColumn(boundaryColumn, colId, in, sizeX, sizeY, firstY); } // horizontally transform first 7 rows in all columns horizontalFDWT97(7, 0); // Index of column handled by this thread. (First half of threads handle // even columns and others handle odd columns.) const int outColumnIndex = parityIdx<WIN_SIZE_X>(); // writer of output linear bands - initialize it const int firstX = hipBlockIdx_x * WIN_SIZE_X + outColumnIndex; VerticalDWTBandWriter<float, CHECK_WRITES> writer; writer.init(sizeX, sizeY, firstX, firstY); // transform buffer offset of column transformed and saved by this thread const int outColumnOffset = buffer.getColumnOffset(outColumnIndex); // (Each iteration of this loop assumes that first 7 rows of transform // buffer are already loaded with horizontally transformed coefficients.) for(int w = 0; w < winSteps; w++) { // Load another WIN_SIZE_Y lines of thread's column into the buffer. loadWindowIntoColumn(in, loadedColumn); // some threads also load boundary columns if(hipThreadIdx_x < 7) { loadWindowIntoColumn(in, boundaryColumn); } // horizontally transform all newly loaded lines horizontalFDWT97(WIN_SIZE_Y, 7); // Using 7 registers, remember current values of last 7 rows of // transform buffer. These rows are transformed horizontally only // and will be used in next iteration. float last7Lines[7]; for(int i = 0; i < 7; i++) { last7Lines[i] = buffer[outColumnOffset + (WIN_SIZE_Y + i) * STRIDE]; } // vertically transform all central columns (do not scale yet) buffer.forEachVerticalOdd(outColumnOffset, AddScaledSum(f97Predict1)); buffer.forEachVerticalEven(outColumnOffset, AddScaledSum(f97Update1)); buffer.forEachVerticalOdd(outColumnOffset, AddScaledSum(f97Predict2)); buffer.forEachVerticalEven(outColumnOffset, AddScaledSum(f97Update2)); // Save all results of current window. Results are in transform buffer // at rows from #4 to #(4 + WIN_SIZE_Y). Other rows are invalid now. // (They only served as a boundary for vertical FDWT.) for(int i = 4; i < (4 + WIN_SIZE_Y); i += 2) { const int index = outColumnOffset + i * STRIDE; // Write low coefficients from column into low band ... writer.writeLowInto(out, buffer[index] * scale97Div); // ... and high coeficients into the high band. writer.writeHighInto(out, buffer[index + STRIDE] * scale97Mul); } // Use last 7 remembered lines as first 7 lines for next iteration. // As expected, these lines are already horizontally transformed. for(int i = 0; i < 7; i++) { buffer[outColumnOffset + i * STRIDE] = last7Lines[i]; } // Wait for all writing threads before proceeding to loading new // pixels in next iteration. (Not to overwrite those which // are not written yet.) __syncthreads(); } } public: /// Runs one of specialized variants of 9/7 FDWT according to distance of /// processed pixels to image boudnary. Some variants do not check for /// boudnary and thus are slightly faster. /// @param in input image /// @param out output buffer /// @param sx width of the input image /// @param sy height of the input image /// @param steps number of steps of sliding window __device__ static void run(const float * const input, float * const output, const int sx, const int sy, const int steps) { // object with transform buffer in shared memory __shared__ FDWT97<WIN_SIZE_X, WIN_SIZE_Y> fdwt97; // Compute limits of this threadblock's block of pixels and use them to // determine, whether this threadblock will have to deal with boundary. // (3 in next expressions is for radius of impulse response of 9/7 FDWT.) const int maxX = (hipBlockIdx_x + 1) * WIN_SIZE_X + 3; const int maxY = (hipBlockIdx_y + 1) * WIN_SIZE_Y * steps + 3; const bool atRightBoudary = maxX >= sx; const bool atBottomBoudary = maxY >= sy; // Select specialized version of code according to distance of this // threadblock's pixels from image boundary. if(atBottomBoudary) { // near bottom boundary => check both writing and reading fdwt97.transform<true, true>(input, output, sx, sy, steps); } else if(atRightBoudary) { // near right boundary only => check writing only fdwt97.transform<false, true>(input, output, sx, sy, steps); } else { // no nearby boundary => check nothing fdwt97.transform<false, false>(input, output, sx, sy, steps); } } }; // end of class FDWT97 /// Main GPU 9/7 FDWT entry point. /// @param input input image /// @parma output output buffer /// @param sx width of the input image /// @param sy height of the input image /// @param steps number of steps of sliding window template <int WIN_SX, int WIN_SY> __launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(FDWT97<WIN_SX, WIN_SY>), 8)) __global__ void fdwt97Kernel(hipLaunchParm lp, const float * const input, float * const output, const int sx, const int sy, const int steps) { // Excuse me, dear reader of this code - this call have to be here. If you // try to simply put contents of following method right here, CUDA compiler // (version 3.2) will spit tons of nonsense messy errors ... // Hope they will not break it even more in future releases. FDWT97<WIN_SX, WIN_SY>::run(input, output, sx, sy, steps); } /// Only computes optimal number of sliding window steps, /// number of threadblocks and then lanches the 9/7 FDWT kernel. /// @tparam WIN_SX width of sliding window /// @tparam WIN_SY height of sliding window /// @param in input image /// @param out output buffer /// @param sx width of the input image /// @param sy height of the input image template <int WIN_SX, int WIN_SY> void launchFDWT97Kernel (float * in, float * out, int sx, int sy) { // compute optimal number of steps of each sliding window const int steps = divRndUp(sy, 15 * WIN_SY); // prepare grid size dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps)); // run kernel, possibly measure time and finally check the call PERF_BEGIN hipLaunchKernel(HIP_KERNEL_NAME(fdwt97Kernel<WIN_SX, WIN_SY>), dim3(gSize), dim3(WIN_SX), 0, 0, in, out, sx, sy, steps); PERF_END(" FDWT97", sx, sy) CudaDWTTester::checkLastKernelCall("FDWT 9/7 kernel"); } /// Forward 9/7 2D DWT. See common rules (dwt.h) for more details. /// @param in Input DWT coefficients. Should be normalized (in range /// [-0.5, 0.5]). Will not be preserved (will be overwritten). /// @param out output buffer on GPU - format specified in common rules /// @param sizeX width of input image (in pixels) /// @param sizeY height of input image (in pixels) /// @param levels number of recursive DWT levels void fdwt97(float * in, float * out, int sizeX, int sizeY, int levels) { // select right width of kernel for the size of the image if(sizeX >= 960) { launchFDWT97Kernel<192, 8>(in, out, sizeX, sizeY); } else if (sizeX >= 480) { launchFDWT97Kernel<128, 6>(in, out, sizeX, sizeY); } else { launchFDWT97Kernel<64, 6>(in, out, sizeX, sizeY); } // if this was not the last level, continue recursively with other levels if(levels > 1) { // copy output's LL band back into input buffer const int llSizeX = divRndUp(sizeX, 2); const int llSizeY = divRndUp(sizeY, 2); memCopy(in, out, llSizeX, llSizeY); // run remaining levels of FDWT fdwt97(in, out, llSizeX, llSizeY, levels - 1); } } } // end of namespace dwt_cuda
31b504f38a3860c65c18b0847946810e1f272d99.cu
#include "hip_runtime.h" /// /// @file fdwt97.cu /// @brief CUDA implementation of forward 9/7 2D DWT. /// @author Martin Jirman ([email protected]) /// @date 2011-01-20 13:18 /// /// /// Copyright (c) 2011 Martin Jirman /// All rights reserved. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" /// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE /// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE /// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE /// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR /// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF /// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS /// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN /// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) /// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. /// #include "common.h" #include "transform_buffer.h" #include "io.h" namespace dwt_cuda { /// Wraps a buffer and methods for computing 9/7 FDWT with sliding window /// of specified size. Template arguments specify this size. /// @tparam WIN_SIZE_X width of sliding window /// @tparam WIN_SIZE_Y height of sliding window template <int WIN_SIZE_X, int WIN_SIZE_Y> class FDWT97 { private: /// Type of shared memory buffer used for 9/7 DWT. typedef TransformBuffer<float, WIN_SIZE_X, WIN_SIZE_Y + 7, 4> FDWT97Buffer; /// Actual shared buffer used for forward 9/7 DWT. FDWT97Buffer buffer; /// Difference of indices of two vertically neighboring items in buffer. enum { STRIDE = FDWT97Buffer::VERTICAL_STRIDE }; /// One thread's info about loading input image /// @tparam CHECKED true if loader should check for image boundaries template <bool CHECKED> struct FDWT97ColumnLoadingInfo { /// Loader of pixels from some input image. VerticalDWTPixelLoader<float, CHECKED> loader; /// Offset of column loaded by loader. (Offset in shared buffer.) int offset; }; /// Horizontal 9/7 FDWT on specified lines of transform buffer. /// @param lines number of lines to be transformed /// @param firstLine index of the first line to be transformed __device__ void horizontalFDWT97(const int lines, const int firstLine) { __syncthreads(); buffer.forEachHorizontalOdd(firstLine, lines, AddScaledSum(f97Predict1)); __syncthreads(); buffer.forEachHorizontalEven(firstLine, lines, AddScaledSum(f97Update1)); __syncthreads(); buffer.forEachHorizontalOdd(firstLine, lines, AddScaledSum(f97Predict2)); __syncthreads(); buffer.forEachHorizontalEven(firstLine, lines, AddScaledSum(f97Update2)); __syncthreads(); buffer.scaleHorizontal(scale97Div, scale97Mul, firstLine, lines); __syncthreads(); } /// Initializes one column of shared transform buffer with 7 input pixels. /// Those 7 pixels will not be transformed. Also initializes given loader. /// @tparam CHECKED true if loader should check for image boundaries /// @param column (uninitialized) object for loading input pixels /// @param columnIndex index (not offset!) of the column to be loaded /// (relative to threadblock's first column) /// @param input pointer to input image in GPU memory /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param firstY index of first row to be loaded from image template <bool CHECKED> __device__ void initColumn(FDWT97ColumnLoadingInfo<CHECKED> & column, const int columnIndex, const float * const input, const int sizeX, const int sizeY, const int firstY) { // get offset of the column with index 'columnIndex' column.offset = buffer.getColumnOffset(columnIndex); // x-coordinate of the first pixel to be loaded by given loader const int firstX = hipBlockIdx_x * WIN_SIZE_X + columnIndex; if(hipBlockIdx_y == 0) { // topmost block - apply mirroring rules when loading first 7 rows column.loader.init(sizeX, sizeY, firstX, firstY); // load pixels in mirrored way buffer[column.offset + 4 * STRIDE] = column.loader.loadFrom(input); buffer[column.offset + 3 * STRIDE] = buffer[column.offset + 5 * STRIDE] = column.loader.loadFrom(input); buffer[column.offset + 2 * STRIDE] = buffer[column.offset + 6 * STRIDE] = column.loader.loadFrom(input); buffer[column.offset + 1 * STRIDE] = column.loader.loadFrom(input); buffer[column.offset + 0 * STRIDE] = column.loader.loadFrom(input); // reinitialize loader to start with pixel #3 again column.loader.init(sizeX, sizeY, firstX, firstY + 3); } else { // non-topmost row - regular loading: column.loader.init(sizeX, sizeY, firstX, firstY - 4); // load 7 rows into the transform buffer for(int i = 0; i < 7; i++) { buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input); } } // Now, the next pixel, which will be loaded by loader, is pixel #3. } /// Loads another WIN_SIZE_Y pixels into given column using given loader. /// @tparam CHECKED true if loader should check for image boundaries /// @param input input image to load from /// @param column loader and offset of loaded column in shared buffer template <bool CHECKED> inline __device__ void loadWindowIntoColumn(const float * const input, FDWT97ColumnLoadingInfo<CHECKED> & column) { for(int i = 7; i < (7 + WIN_SIZE_Y); i++) { buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input); } } /// Main GPU 9/7 FDWT entry point. /// @tparam CHECK_LOADS true if boundaries should be checked when loading /// @tparam CHECK_WRITES true if boundaries should be checked when writing /// @param in input image /// @param out output buffer /// @param sizeX width of the input image /// @param sizeY height of the input image /// @param winSteps number of steps of sliding window template <bool CHECK_LOADS, bool CHECK_WRITES> __device__ void transform(const float * const in, float * const out, const int sizeX, const int sizeY, const int winSteps) { // info about columns loaded by this thread: one main column and possibly // one boundary column. (Only some threads load some boundary column.) FDWT97ColumnLoadingInfo<CHECK_LOADS> loadedColumn; FDWT97ColumnLoadingInfo<CHECK_LOADS> boundaryColumn; // Initialize first 7 lines of transform buffer. const int firstY = hipBlockIdx_y * WIN_SIZE_Y * winSteps; initColumn(loadedColumn, hipThreadIdx_x, in, sizeX, sizeY, firstY); // Some threads initialize boundary columns. boundaryColumn.offset = 0; boundaryColumn.loader.clear(); if(hipThreadIdx_x < 7) { // each thread among first 7 ones gets index of one of boundary columns const int colId = hipThreadIdx_x + ((hipThreadIdx_x < 3) ? WIN_SIZE_X : -7); // Thread initializes offset of the boundary column (in shared buffer), // first 7 pixels of the column and a loader for this column. initColumn(boundaryColumn, colId, in, sizeX, sizeY, firstY); } // horizontally transform first 7 rows in all columns horizontalFDWT97(7, 0); // Index of column handled by this thread. (First half of threads handle // even columns and others handle odd columns.) const int outColumnIndex = parityIdx<WIN_SIZE_X>(); // writer of output linear bands - initialize it const int firstX = hipBlockIdx_x * WIN_SIZE_X + outColumnIndex; VerticalDWTBandWriter<float, CHECK_WRITES> writer; writer.init(sizeX, sizeY, firstX, firstY); // transform buffer offset of column transformed and saved by this thread const int outColumnOffset = buffer.getColumnOffset(outColumnIndex); // (Each iteration of this loop assumes that first 7 rows of transform // buffer are already loaded with horizontally transformed coefficients.) for(int w = 0; w < winSteps; w++) { // Load another WIN_SIZE_Y lines of thread's column into the buffer. loadWindowIntoColumn(in, loadedColumn); // some threads also load boundary columns if(hipThreadIdx_x < 7) { loadWindowIntoColumn(in, boundaryColumn); } // horizontally transform all newly loaded lines horizontalFDWT97(WIN_SIZE_Y, 7); // Using 7 registers, remember current values of last 7 rows of // transform buffer. These rows are transformed horizontally only // and will be used in next iteration. float last7Lines[7]; for(int i = 0; i < 7; i++) { last7Lines[i] = buffer[outColumnOffset + (WIN_SIZE_Y + i) * STRIDE]; } // vertically transform all central columns (do not scale yet) buffer.forEachVerticalOdd(outColumnOffset, AddScaledSum(f97Predict1)); buffer.forEachVerticalEven(outColumnOffset, AddScaledSum(f97Update1)); buffer.forEachVerticalOdd(outColumnOffset, AddScaledSum(f97Predict2)); buffer.forEachVerticalEven(outColumnOffset, AddScaledSum(f97Update2)); // Save all results of current window. Results are in transform buffer // at rows from #4 to #(4 + WIN_SIZE_Y). Other rows are invalid now. // (They only served as a boundary for vertical FDWT.) for(int i = 4; i < (4 + WIN_SIZE_Y); i += 2) { const int index = outColumnOffset + i * STRIDE; // Write low coefficients from column into low band ... writer.writeLowInto(out, buffer[index] * scale97Div); // ... and high coeficients into the high band. writer.writeHighInto(out, buffer[index + STRIDE] * scale97Mul); } // Use last 7 remembered lines as first 7 lines for next iteration. // As expected, these lines are already horizontally transformed. for(int i = 0; i < 7; i++) { buffer[outColumnOffset + i * STRIDE] = last7Lines[i]; } // Wait for all writing threads before proceeding to loading new // pixels in next iteration. (Not to overwrite those which // are not written yet.) __syncthreads(); } } public: /// Runs one of specialized variants of 9/7 FDWT according to distance of /// processed pixels to image boudnary. Some variants do not check for /// boudnary and thus are slightly faster. /// @param in input image /// @param out output buffer /// @param sx width of the input image /// @param sy height of the input image /// @param steps number of steps of sliding window __device__ static void run(const float * const input, float * const output, const int sx, const int sy, const int steps) { // object with transform buffer in shared memory __shared__ FDWT97<WIN_SIZE_X, WIN_SIZE_Y> fdwt97; // Compute limits of this threadblock's block of pixels and use them to // determine, whether this threadblock will have to deal with boundary. // (3 in next expressions is for radius of impulse response of 9/7 FDWT.) const int maxX = (hipBlockIdx_x + 1) * WIN_SIZE_X + 3; const int maxY = (hipBlockIdx_y + 1) * WIN_SIZE_Y * steps + 3; const bool atRightBoudary = maxX >= sx; const bool atBottomBoudary = maxY >= sy; // Select specialized version of code according to distance of this // threadblock's pixels from image boundary. if(atBottomBoudary) { // near bottom boundary => check both writing and reading fdwt97.transform<true, true>(input, output, sx, sy, steps); } else if(atRightBoudary) { // near right boundary only => check writing only fdwt97.transform<false, true>(input, output, sx, sy, steps); } else { // no nearby boundary => check nothing fdwt97.transform<false, false>(input, output, sx, sy, steps); } } }; // end of class FDWT97 /// Main GPU 9/7 FDWT entry point. /// @param input input image /// @parma output output buffer /// @param sx width of the input image /// @param sy height of the input image /// @param steps number of steps of sliding window template <int WIN_SX, int WIN_SY> __launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(FDWT97<WIN_SX, WIN_SY>), 8)) __global__ void fdwt97Kernel(hipLaunchParm lp, const float * const input, float * const output, const int sx, const int sy, const int steps) { // Excuse me, dear reader of this code - this call have to be here. If you // try to simply put contents of following method right here, CUDA compiler // (version 3.2) will spit tons of nonsense messy errors ... // Hope they will not break it even more in future releases. FDWT97<WIN_SX, WIN_SY>::run(input, output, sx, sy, steps); } /// Only computes optimal number of sliding window steps, /// number of threadblocks and then lanches the 9/7 FDWT kernel. /// @tparam WIN_SX width of sliding window /// @tparam WIN_SY height of sliding window /// @param in input image /// @param out output buffer /// @param sx width of the input image /// @param sy height of the input image template <int WIN_SX, int WIN_SY> void launchFDWT97Kernel (float * in, float * out, int sx, int sy) { // compute optimal number of steps of each sliding window const int steps = divRndUp(sy, 15 * WIN_SY); // prepare grid size dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps)); // run kernel, possibly measure time and finally check the call PERF_BEGIN hipLaunchKernel(HIP_KERNEL_NAME(fdwt97Kernel<WIN_SX, WIN_SY>), dim3(gSize), dim3(WIN_SX), 0, 0, in, out, sx, sy, steps); PERF_END(" FDWT97", sx, sy) CudaDWTTester::checkLastKernelCall("FDWT 9/7 kernel"); } /// Forward 9/7 2D DWT. See common rules (dwt.h) for more details. /// @param in Input DWT coefficients. Should be normalized (in range /// [-0.5, 0.5]). Will not be preserved (will be overwritten). /// @param out output buffer on GPU - format specified in common rules /// @param sizeX width of input image (in pixels) /// @param sizeY height of input image (in pixels) /// @param levels number of recursive DWT levels void fdwt97(float * in, float * out, int sizeX, int sizeY, int levels) { // select right width of kernel for the size of the image if(sizeX >= 960) { launchFDWT97Kernel<192, 8>(in, out, sizeX, sizeY); } else if (sizeX >= 480) { launchFDWT97Kernel<128, 6>(in, out, sizeX, sizeY); } else { launchFDWT97Kernel<64, 6>(in, out, sizeX, sizeY); } // if this was not the last level, continue recursively with other levels if(levels > 1) { // copy output's LL band back into input buffer const int llSizeX = divRndUp(sizeX, 2); const int llSizeY = divRndUp(sizeY, 2); memCopy(in, out, llSizeX, llSizeY); // run remaining levels of FDWT fdwt97(in, out, llSizeX, llSizeY, levels - 1); } } } // end of namespace dwt_cuda
03188975cfd20dc7a1040219002a2ee48813fafc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vel_step.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float4 __restrict__ *deviceVel = NULL; hipMalloc(&deviceVel, XSIZE*YSIZE); float3 __restrict__ *accels = NULL; hipMalloc(&accels, XSIZE*YSIZE); unsigned int numBodies = 1; float dt = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vel_step), dim3(gridBlock),dim3(threadBlock), 0, 0, deviceVel,accels,numBodies,dt); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vel_step), dim3(gridBlock),dim3(threadBlock), 0, 0, deviceVel,accels,numBodies,dt); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vel_step), dim3(gridBlock),dim3(threadBlock), 0, 0, deviceVel,accels,numBodies,dt); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
03188975cfd20dc7a1040219002a2ee48813fafc.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vel_step.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float4 __restrict__ *deviceVel = NULL; cudaMalloc(&deviceVel, XSIZE*YSIZE); float3 __restrict__ *accels = NULL; cudaMalloc(&accels, XSIZE*YSIZE); unsigned int numBodies = 1; float dt = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vel_step<<<gridBlock,threadBlock>>>(deviceVel,accels,numBodies,dt); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vel_step<<<gridBlock,threadBlock>>>(deviceVel,accels,numBodies,dt); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vel_step<<<gridBlock,threadBlock>>>(deviceVel,accels,numBodies,dt); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cbc1295d2e2a50fc4218dabe68f4a2e7d76464a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pointwise_targets.cuh" #include <catboost/cuda/cuda_lib/kernel/kernel.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/fill.cuh> namespace NKernel { struct TQuantileTarget { float Alpha; __host__ __device__ __forceinline__ TQuantileTarget(float alpha = 0.5) : Alpha(alpha) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float val = target - prediction; const float multiplier = (val > 0) ? Alpha : -(1 - Alpha); return multiplier * val; } __device__ __forceinline__ float Der(float target, float prediction) const { const float val = target - prediction; return (val > 0) ? Alpha : -(1.0f - Alpha); } __device__ __forceinline__ float Der2(float, float) const { return 0; } }; struct TTweedieTarget { float VariancePower; __host__ __device__ __forceinline__ TTweedieTarget(float variancePower = 1.5) : VariancePower(variancePower) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float val = -target * ::exp((1 - VariancePower) * prediction) / (1 - VariancePower); const float delta = ::exp((2 - VariancePower) * prediction) / (2 - VariancePower); return val + delta; } __device__ __forceinline__ float Der(float target, float prediction) const { const float der = target * ::exp((1 - VariancePower) * prediction); const float delta = ::exp((2 - VariancePower) * prediction); return der - delta; } __device__ __forceinline__ float Der2(float target, float prediction) const { const float der2 = target * ::exp((1 - VariancePower) * prediction) * (1 - VariancePower); const float delta = ::exp((2 - VariancePower) * prediction) * (2 - VariancePower); return der2 - delta; } }; struct THuberTarget { static constexpr double HUBER_DER2 = -1.0; float Delta; __host__ __device__ __forceinline__ THuberTarget(float delta) : Delta(delta) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float targetMismatch = fabs(target - prediction); if (targetMismatch < Delta) { return 0.5 * targetMismatch * targetMismatch; } else { return Delta * (targetMismatch - 0.5 * Delta); } } __device__ __forceinline__ float Der(float target, float prediction) const { const float diff = target - prediction; if (fabs(diff) < Delta) { return diff; } else { return diff > 0.0 ? Delta : -Delta; } } __device__ __forceinline__ float Der2(float target, float prediction) const { const float diff = target - prediction; if (fabs(diff) < Delta) { return HUBER_DER2; } else { return 0.0; } } }; struct TExpectileTarget { float Alpha; __host__ __device__ __forceinline__ TExpectileTarget(float alpha = 0.5) : Alpha(alpha) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float val = target - prediction; const float multiplier = (val > 0) ? Alpha : (1 - Alpha); return multiplier * val * val; } __device__ __forceinline__ float Der(float target, float prediction) const { const float val = target - prediction; const float multiplier = (val > 0) ? Alpha : (1 - Alpha); return 2.0 * multiplier * val; } __device__ __forceinline__ float Der2(float target, float prediction) const { const float val = target - prediction; const float multiplier = (val > 0) ? Alpha : (1 - Alpha); return 2.0 * multiplier; } }; struct TLogLinQuantileTarget { float Alpha; __host__ __device__ __forceinline__ TLogLinQuantileTarget(float alpha = 0.5) : Alpha(alpha) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float val = target - __expf(prediction); const float multiplier = (val > 0) ? Alpha : -(1 - Alpha); return val * multiplier; } __device__ __forceinline__ float Der(float target, float prediction) const { const float expPred = __expf(prediction); return (target - expPred > 0) ? Alpha * expPred : -(1 - Alpha) * expPred; } __device__ __forceinline__ float Der2(float, float) const { return 0; } }; struct TMAPETarget { __device__ __forceinline__ float Score(float target, float prediction) const { return abs(target - prediction) / max(1.f, abs(target)); } __device__ __forceinline__ float Der(float target, float prediction) const { return (target - prediction > 0) ? 1.0f / max(1.f, abs(target)) : -1.0f / max(1.f, abs(target)); } __device__ __forceinline__ float Der2(float, float) const { return 0; } }; struct TPoissonTarget { __device__ __forceinline__ float Score(float target, float prediction) const { return (__expf(prediction) - target * prediction); } __device__ __forceinline__ float Der(float target, float prediction) const { const float expPred = __expf(prediction); return target - expPred; } __device__ __forceinline__ float Der2(float, float prediction) const { return __expf(prediction); } }; struct TRmseTarget { __device__ __forceinline__ float Score(float target, float prediction) const { return (target - prediction) * (target - prediction); } __device__ __forceinline__ float Der(float target, float prediction) const { return target - prediction; } __device__ __forceinline__ float Der2(float, float prediction) const { return 1.0f; } }; __forceinline__ __device__ float sign(float x) { return x > 0 ? 1.0f : -1.0f; } struct TLqTarget { __host__ __device__ __forceinline__ TLqTarget(float q) : Q(q) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float absLoss = abs(target - prediction); return __powf(absLoss, Q); } __device__ __forceinline__ float Der(float target, float prediction) const { const float absLoss = abs(target - prediction); float absLossQ = powf(absLoss, Q - 1); return Q * sign(target - prediction) * absLossQ; } __device__ __forceinline__ float Der2(float target, float prediction) const { const float absLoss = abs(target - prediction); return Q >= 2 ? Q * (Q - 1) * powf(absLoss, Q - 2) : 1.0f; } float Q = 2; }; struct TNumErrorsMetric { float K; __host__ __device__ __forceinline__ TNumErrorsMetric(float k) : K(k) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float val = abs(target - prediction); return val > K ? 1 : 0; } __device__ __forceinline__ float Der(float, float) const { return 0; } __device__ __forceinline__ float Der2(float, float) const { return 0; } }; template <class TTarget, int BLOCK_SIZE> __global__ void PointwiseTargetImpl(const float* relevs, const float* weights, ui32 size, const float* predictions, TTarget target, float* functionValue, float* der, float* der2) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float tmpScores[BLOCK_SIZE]; const float val = i < size ? predictions[i] : 0; const float relev = i < size ? relevs[i] : 0; const float weight = (weights && (i < size)) ? weights[i] : 1.0f; if (i < size) { if (der) { der[i] = weight * target.Der(relev, val); } if (der2) { der2[i] = weight * target.Der2(relev, val); } } if (functionValue) { tmpScores[threadIdx.x] = (i < size) ? -weight * target.Score(relev, val) : 0; __syncthreads(); } if (functionValue) { float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BLOCK_SIZE); if (threadIdx.x == 0) { atomicAdd(functionValue, val); } } } template <int BLOCK_SIZE> __global__ void MseImpl(const float* relevs, const float* weights, ui32 size, const float* predictions, float* functionValue, float* der, float* der2) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float tmpScores[BLOCK_SIZE]; const float val = i < size ? predictions[i] : 0; const float relev = i < size ? relevs[i] : 0; const float direction = relev - val; const float weight = (weights && (i < size)) ? weights[i] : 1.0f; if (i < size) { if (der) { der[i] = weight * direction; } if (der2) { der2[i] = weight; } } if (functionValue) { tmpScores[threadIdx.x] = (i < size) ? -weight * (val - relev) * (val - relev) : 0; __syncthreads(); } if (functionValue) { float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BLOCK_SIZE); if (threadIdx.x == 0) { atomicAdd(functionValue, val); } } } template <int BLOCK_SIZE, int ELEMENTS_PER_THREAD, bool HAS_BORDER> __launch_bounds__(BLOCK_SIZE, 2048 / BLOCK_SIZE) __global__ void CrossEntropyImpl(const float* targetClasses, const float* targetWeights, ui32 size, const float* predictions, float* functionValue, float* der, float* der2, float border) { ui32 tid = blockIdx.x * BLOCK_SIZE * ELEMENTS_PER_THREAD + threadIdx.x; float tmpScore = 0; float direction[ELEMENTS_PER_THREAD]; float weight[ELEMENTS_PER_THREAD]; float scale[ELEMENTS_PER_THREAD]; #pragma unroll for (int j = 0; j < ELEMENTS_PER_THREAD; ++j) { const int idx = tid + j * BLOCK_SIZE; direction[j] = idx < size ? predictions[idx] : 0; weight[j] = (targetWeights && (idx < size)) ? targetWeights[idx] : 1.0f; scale[j] = (idx < size) ? targetClasses[idx] : 1.0f; } #pragma unroll for (int j = 0; j < ELEMENTS_PER_THREAD; ++j) { const int idx = tid + j * BLOCK_SIZE; const float val = direction[j]; const float targetClass = scale[j]; const float expVal = idx < size ? __expf(val) : 0; const float p = max(min(isfinite(expVal) ? expVal / (1.0f + expVal) : 1.0f, 1.0f - 1e-40f), 1e-40f); const float c = HAS_BORDER ? targetClass > border : targetClass; direction[j] = c - p; //c * (1 - p) - (1-c) * p; scale[j] = p * (1.0f - p); if (functionValue) { const float logExpValPlusOne = isfinite(expVal) ? __logf(1 + expVal) : val; tmpScore += (idx < size) ? weight[j] * (c * val - logExpValPlusOne) : 0; } } #pragma unroll for (int j = 0; j < ELEMENTS_PER_THREAD; ++j) { const int idx = tid + j * BLOCK_SIZE; //we already classify this observations if (der && (idx < size)) { der[idx] = weight[j] * direction[j]; } if (der2 && (idx < size)) { der2[idx] = weight[j] * scale[j]; } } if (functionValue) { __shared__ float tmpScores[BLOCK_SIZE]; tmpScores[threadIdx.x] = tmpScore; __syncthreads(); float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BLOCK_SIZE); if (threadIdx.x == 0) { atomicAdd(functionValue, val); } } } void CrossEntropyTargetKernel(const float* targetClasses, const float* targetWeights, ui32 size, const float* predictions, float* functionValue, float* der, float* der2, float border, bool useBorder, TCudaStream stream) { const ui32 blockSize = 512; const ui32 elementsPerThreads = 2; const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize); //TODO: get rid of this if (functionValue) { FillBuffer(functionValue, 0.0f, 1, stream); } if (useBorder) { hipLaunchKernelGGL(( CrossEntropyImpl < blockSize, elementsPerThreads, true >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, targetWeights, size, predictions, functionValue, der, der2, border); } else { hipLaunchKernelGGL(( CrossEntropyImpl < blockSize, elementsPerThreads, false >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, targetWeights, size, predictions, functionValue, der, der2, border); } } void MseTargetKernel(const float* relevs, const float* weights, ui32 size, const float* predictions, float* functionValue, float* der, float* der2, TCudaStream stream) { const ui32 blockSize = 1024; const ui32 numBlocks = (size + blockSize - 1) / blockSize; //TODO: get rid of this if (functionValue) { FillBuffer(functionValue, 0.0f, 1, stream); } hipLaunchKernelGGL(( MseImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, relevs, weights, size, predictions, functionValue, der, der2); } template <int BLOCK_SIZE, class TTarget> void RunPointwiseTargetKernel(const float* relevs, const float* weights, ui32 size, TTarget target, const float* predictions, float* functionValue, float* der, float* der2, TCudaStream stream) { const ui32 numBlocks = (size + BLOCK_SIZE - 1) / BLOCK_SIZE; hipLaunchKernelGGL(( PointwiseTargetImpl<TTarget, BLOCK_SIZE>), dim3(numBlocks), dim3(BLOCK_SIZE), 0, stream, relevs, weights, size, predictions, target, functionValue, der, der2); } #define POINTWISE_TARGET() \ RunPointwiseTargetKernel<blockSize>(relevs, weights, size, target, predictions, functionValue, der, der2, stream); void PointwiseTargetKernel(const float* relevs, const float* weights, ui32 size, ELossFunction loss, float alpha, const float* predictions, float* functionValue, float* der, float* der2, TCudaStream stream) { const ui32 blockSize = 1024; switch (loss) { case ELossFunction::Expectile: { TExpectileTarget target(alpha); POINTWISE_TARGET() break; } case ELossFunction::Quantile: case ELossFunction::MAE: { TQuantileTarget target(alpha); POINTWISE_TARGET() break; } case ELossFunction::LogLinQuantile: { TLogLinQuantileTarget target(alpha); POINTWISE_TARGET() break; } case ELossFunction::MAPE: { TMAPETarget target; POINTWISE_TARGET() break; } case ELossFunction::Poisson: { TPoissonTarget target; POINTWISE_TARGET() break; } case ELossFunction::Lq: { TLqTarget target(alpha); POINTWISE_TARGET() break; } case ELossFunction::RMSE: { TRmseTarget target; POINTWISE_TARGET() break; } case ELossFunction::NumErrors: { TNumErrorsMetric target(alpha); POINTWISE_TARGET() break; } case ELossFunction::Tweedie: { TTweedieTarget target(alpha); POINTWISE_TARGET() break; } case ELossFunction::Huber: { THuberTarget target(alpha); POINTWISE_TARGET() break; } default: { Y_VERIFY(false, "Unknown target"); } } } }
cbc1295d2e2a50fc4218dabe68f4a2e7d76464a1.cu
#include "pointwise_targets.cuh" #include <catboost/cuda/cuda_lib/kernel/kernel.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/fill.cuh> namespace NKernel { struct TQuantileTarget { float Alpha; __host__ __device__ __forceinline__ TQuantileTarget(float alpha = 0.5) : Alpha(alpha) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float val = target - prediction; const float multiplier = (val > 0) ? Alpha : -(1 - Alpha); return multiplier * val; } __device__ __forceinline__ float Der(float target, float prediction) const { const float val = target - prediction; return (val > 0) ? Alpha : -(1.0f - Alpha); } __device__ __forceinline__ float Der2(float, float) const { return 0; } }; struct TTweedieTarget { float VariancePower; __host__ __device__ __forceinline__ TTweedieTarget(float variancePower = 1.5) : VariancePower(variancePower) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float val = -target * std::exp((1 - VariancePower) * prediction) / (1 - VariancePower); const float delta = std::exp((2 - VariancePower) * prediction) / (2 - VariancePower); return val + delta; } __device__ __forceinline__ float Der(float target, float prediction) const { const float der = target * std::exp((1 - VariancePower) * prediction); const float delta = std::exp((2 - VariancePower) * prediction); return der - delta; } __device__ __forceinline__ float Der2(float target, float prediction) const { const float der2 = target * std::exp((1 - VariancePower) * prediction) * (1 - VariancePower); const float delta = std::exp((2 - VariancePower) * prediction) * (2 - VariancePower); return der2 - delta; } }; struct THuberTarget { static constexpr double HUBER_DER2 = -1.0; float Delta; __host__ __device__ __forceinline__ THuberTarget(float delta) : Delta(delta) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float targetMismatch = fabs(target - prediction); if (targetMismatch < Delta) { return 0.5 * targetMismatch * targetMismatch; } else { return Delta * (targetMismatch - 0.5 * Delta); } } __device__ __forceinline__ float Der(float target, float prediction) const { const float diff = target - prediction; if (fabs(diff) < Delta) { return diff; } else { return diff > 0.0 ? Delta : -Delta; } } __device__ __forceinline__ float Der2(float target, float prediction) const { const float diff = target - prediction; if (fabs(diff) < Delta) { return HUBER_DER2; } else { return 0.0; } } }; struct TExpectileTarget { float Alpha; __host__ __device__ __forceinline__ TExpectileTarget(float alpha = 0.5) : Alpha(alpha) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float val = target - prediction; const float multiplier = (val > 0) ? Alpha : (1 - Alpha); return multiplier * val * val; } __device__ __forceinline__ float Der(float target, float prediction) const { const float val = target - prediction; const float multiplier = (val > 0) ? Alpha : (1 - Alpha); return 2.0 * multiplier * val; } __device__ __forceinline__ float Der2(float target, float prediction) const { const float val = target - prediction; const float multiplier = (val > 0) ? Alpha : (1 - Alpha); return 2.0 * multiplier; } }; struct TLogLinQuantileTarget { float Alpha; __host__ __device__ __forceinline__ TLogLinQuantileTarget(float alpha = 0.5) : Alpha(alpha) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float val = target - __expf(prediction); const float multiplier = (val > 0) ? Alpha : -(1 - Alpha); return val * multiplier; } __device__ __forceinline__ float Der(float target, float prediction) const { const float expPred = __expf(prediction); return (target - expPred > 0) ? Alpha * expPred : -(1 - Alpha) * expPred; } __device__ __forceinline__ float Der2(float, float) const { return 0; } }; struct TMAPETarget { __device__ __forceinline__ float Score(float target, float prediction) const { return abs(target - prediction) / max(1.f, abs(target)); } __device__ __forceinline__ float Der(float target, float prediction) const { return (target - prediction > 0) ? 1.0f / max(1.f, abs(target)) : -1.0f / max(1.f, abs(target)); } __device__ __forceinline__ float Der2(float, float) const { return 0; } }; struct TPoissonTarget { __device__ __forceinline__ float Score(float target, float prediction) const { return (__expf(prediction) - target * prediction); } __device__ __forceinline__ float Der(float target, float prediction) const { const float expPred = __expf(prediction); return target - expPred; } __device__ __forceinline__ float Der2(float, float prediction) const { return __expf(prediction); } }; struct TRmseTarget { __device__ __forceinline__ float Score(float target, float prediction) const { return (target - prediction) * (target - prediction); } __device__ __forceinline__ float Der(float target, float prediction) const { return target - prediction; } __device__ __forceinline__ float Der2(float, float prediction) const { return 1.0f; } }; __forceinline__ __device__ float sign(float x) { return x > 0 ? 1.0f : -1.0f; } struct TLqTarget { __host__ __device__ __forceinline__ TLqTarget(float q) : Q(q) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float absLoss = abs(target - prediction); return __powf(absLoss, Q); } __device__ __forceinline__ float Der(float target, float prediction) const { const float absLoss = abs(target - prediction); float absLossQ = powf(absLoss, Q - 1); return Q * sign(target - prediction) * absLossQ; } __device__ __forceinline__ float Der2(float target, float prediction) const { const float absLoss = abs(target - prediction); return Q >= 2 ? Q * (Q - 1) * powf(absLoss, Q - 2) : 1.0f; } float Q = 2; }; struct TNumErrorsMetric { float K; __host__ __device__ __forceinline__ TNumErrorsMetric(float k) : K(k) { } __device__ __forceinline__ float Score(float target, float prediction) const { const float val = abs(target - prediction); return val > K ? 1 : 0; } __device__ __forceinline__ float Der(float, float) const { return 0; } __device__ __forceinline__ float Der2(float, float) const { return 0; } }; template <class TTarget, int BLOCK_SIZE> __global__ void PointwiseTargetImpl(const float* relevs, const float* weights, ui32 size, const float* predictions, TTarget target, float* functionValue, float* der, float* der2) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float tmpScores[BLOCK_SIZE]; const float val = i < size ? predictions[i] : 0; const float relev = i < size ? relevs[i] : 0; const float weight = (weights && (i < size)) ? weights[i] : 1.0f; if (i < size) { if (der) { der[i] = weight * target.Der(relev, val); } if (der2) { der2[i] = weight * target.Der2(relev, val); } } if (functionValue) { tmpScores[threadIdx.x] = (i < size) ? -weight * target.Score(relev, val) : 0; __syncthreads(); } if (functionValue) { float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BLOCK_SIZE); if (threadIdx.x == 0) { atomicAdd(functionValue, val); } } } template <int BLOCK_SIZE> __global__ void MseImpl(const float* relevs, const float* weights, ui32 size, const float* predictions, float* functionValue, float* der, float* der2) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float tmpScores[BLOCK_SIZE]; const float val = i < size ? predictions[i] : 0; const float relev = i < size ? relevs[i] : 0; const float direction = relev - val; const float weight = (weights && (i < size)) ? weights[i] : 1.0f; if (i < size) { if (der) { der[i] = weight * direction; } if (der2) { der2[i] = weight; } } if (functionValue) { tmpScores[threadIdx.x] = (i < size) ? -weight * (val - relev) * (val - relev) : 0; __syncthreads(); } if (functionValue) { float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BLOCK_SIZE); if (threadIdx.x == 0) { atomicAdd(functionValue, val); } } } template <int BLOCK_SIZE, int ELEMENTS_PER_THREAD, bool HAS_BORDER> __launch_bounds__(BLOCK_SIZE, 2048 / BLOCK_SIZE) __global__ void CrossEntropyImpl(const float* targetClasses, const float* targetWeights, ui32 size, const float* predictions, float* functionValue, float* der, float* der2, float border) { ui32 tid = blockIdx.x * BLOCK_SIZE * ELEMENTS_PER_THREAD + threadIdx.x; float tmpScore = 0; float direction[ELEMENTS_PER_THREAD]; float weight[ELEMENTS_PER_THREAD]; float scale[ELEMENTS_PER_THREAD]; #pragma unroll for (int j = 0; j < ELEMENTS_PER_THREAD; ++j) { const int idx = tid + j * BLOCK_SIZE; direction[j] = idx < size ? predictions[idx] : 0; weight[j] = (targetWeights && (idx < size)) ? targetWeights[idx] : 1.0f; scale[j] = (idx < size) ? targetClasses[idx] : 1.0f; } #pragma unroll for (int j = 0; j < ELEMENTS_PER_THREAD; ++j) { const int idx = tid + j * BLOCK_SIZE; const float val = direction[j]; const float targetClass = scale[j]; const float expVal = idx < size ? __expf(val) : 0; const float p = max(min(isfinite(expVal) ? expVal / (1.0f + expVal) : 1.0f, 1.0f - 1e-40f), 1e-40f); const float c = HAS_BORDER ? targetClass > border : targetClass; direction[j] = c - p; //c * (1 - p) - (1-c) * p; scale[j] = p * (1.0f - p); if (functionValue) { const float logExpValPlusOne = isfinite(expVal) ? __logf(1 + expVal) : val; tmpScore += (idx < size) ? weight[j] * (c * val - logExpValPlusOne) : 0; } } #pragma unroll for (int j = 0; j < ELEMENTS_PER_THREAD; ++j) { const int idx = tid + j * BLOCK_SIZE; //we already classify this observations if (der && (idx < size)) { der[idx] = weight[j] * direction[j]; } if (der2 && (idx < size)) { der2[idx] = weight[j] * scale[j]; } } if (functionValue) { __shared__ float tmpScores[BLOCK_SIZE]; tmpScores[threadIdx.x] = tmpScore; __syncthreads(); float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BLOCK_SIZE); if (threadIdx.x == 0) { atomicAdd(functionValue, val); } } } void CrossEntropyTargetKernel(const float* targetClasses, const float* targetWeights, ui32 size, const float* predictions, float* functionValue, float* der, float* der2, float border, bool useBorder, TCudaStream stream) { const ui32 blockSize = 512; const ui32 elementsPerThreads = 2; const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize); //TODO: get rid of this if (functionValue) { FillBuffer(functionValue, 0.0f, 1, stream); } if (useBorder) { CrossEntropyImpl < blockSize, elementsPerThreads, true ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, targetWeights, size, predictions, functionValue, der, der2, border); } else { CrossEntropyImpl < blockSize, elementsPerThreads, false ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, targetWeights, size, predictions, functionValue, der, der2, border); } } void MseTargetKernel(const float* relevs, const float* weights, ui32 size, const float* predictions, float* functionValue, float* der, float* der2, TCudaStream stream) { const ui32 blockSize = 1024; const ui32 numBlocks = (size + blockSize - 1) / blockSize; //TODO: get rid of this if (functionValue) { FillBuffer(functionValue, 0.0f, 1, stream); } MseImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(relevs, weights, size, predictions, functionValue, der, der2); } template <int BLOCK_SIZE, class TTarget> void RunPointwiseTargetKernel(const float* relevs, const float* weights, ui32 size, TTarget target, const float* predictions, float* functionValue, float* der, float* der2, TCudaStream stream) { const ui32 numBlocks = (size + BLOCK_SIZE - 1) / BLOCK_SIZE; PointwiseTargetImpl<TTarget, BLOCK_SIZE><<<numBlocks, BLOCK_SIZE, 0, stream>>>(relevs, weights, size, predictions, target, functionValue, der, der2); } #define POINTWISE_TARGET() \ RunPointwiseTargetKernel<blockSize>(relevs, weights, size, target, predictions, functionValue, der, der2, stream); void PointwiseTargetKernel(const float* relevs, const float* weights, ui32 size, ELossFunction loss, float alpha, const float* predictions, float* functionValue, float* der, float* der2, TCudaStream stream) { const ui32 blockSize = 1024; switch (loss) { case ELossFunction::Expectile: { TExpectileTarget target(alpha); POINTWISE_TARGET() break; } case ELossFunction::Quantile: case ELossFunction::MAE: { TQuantileTarget target(alpha); POINTWISE_TARGET() break; } case ELossFunction::LogLinQuantile: { TLogLinQuantileTarget target(alpha); POINTWISE_TARGET() break; } case ELossFunction::MAPE: { TMAPETarget target; POINTWISE_TARGET() break; } case ELossFunction::Poisson: { TPoissonTarget target; POINTWISE_TARGET() break; } case ELossFunction::Lq: { TLqTarget target(alpha); POINTWISE_TARGET() break; } case ELossFunction::RMSE: { TRmseTarget target; POINTWISE_TARGET() break; } case ELossFunction::NumErrors: { TNumErrorsMetric target(alpha); POINTWISE_TARGET() break; } case ELossFunction::Tweedie: { TTweedieTarget target(alpha); POINTWISE_TARGET() break; } case ELossFunction::Huber: { THuberTarget target(alpha); POINTWISE_TARGET() break; } default: { Y_VERIFY(false, "Unknown target"); } } } }
89207695d94217dc701fcdd9c2bfe25b77172bb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <rocblas.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/execution_policy.h> #include <algorithm> #include <string> // #define SHARED_ #define pow_2(x) ( ((x) * (x)) ) // ####### BLOCK SIZE ###### #define BLOCK_SIZE_BIG_750 512 #define BLOCK_SIZE_BIG_480 256 #define BLOCK_SIZE_SMALL_750 96 #define BLOCK_SIZE_SMALL_480 64 // #define BLOCK_SIZE_BIG_480 10 // #define BLOCK_SIZE_SMALL_750 4 // #define BLOCK_SIZE_SMALL_480 10 // ######################### // ####### UNROLLING ####### #define UNROLL_N_BIG_750 16 #define UNROLL_N_BIG_480 16 // #define UNROLL_N_BIG_750 1 // #define UNROLL_N_BIG_480 1 #define UNROLL_N_SMALL_750 8 #define UNROLL_N_SMALL_480 32 // #define UNROLL_N_SMALL_750 1 // #define UNROLL_N_SMALL_480 1 // ######################### // ####### UNROLLING ####### #define INNER_N_BIG_750 4 #define INNER_N_BIG_480 4 #define INNER_N_SMALL_750 2 #define INNER_N_SMALL_480 2 // ######################### enum GPU_t { NONE, GTX_750, GTX_480 }; GPU_t GPU_TYPE = NONE; float CPU_reduction(float *d_data, const unsigned int n) { float* h_odata = (float *) malloc(n * sizeof(float)); hipMemcpy(h_odata, d_data, n * sizeof(float), hipMemcpyDeviceToHost); float result = 0.f; for (uint i = 0; i < n; i++) { result += h_odata[i]; } free(h_odata); return result; } __device__ float d_final_result = 0.0f; // expects d_data to be array of size n = 2^k __global__ void GPU_reduction(float *d_data, unsigned int n) { const unsigned index = threadIdx.x + blockIdx.x * blockDim.x; d_final_result += d_data[index] + d_data[2 * index]; } struct Atom { float x, y, z; }; struct Float_4 { __device__ inline static constexpr float get(const float4 & data, const int & p) { return p == 0 ? data.x : (p == 1 ? data.y : (p == 2 ? data.z : data.w)); } }; // For given n computes maximal number k which satisfies n % 2^k == 0 __device__ constexpr int divisible_2(int n, int k = 0) { return n % 2 == 0 ? divisible_2(n / 2, k + 1) : k; } // For given n computes maximal number s such as n % s == 0 and s % 2 != 0 __device__ constexpr int factor_2(int n) { return n % 2 == 0 ? factor_2(n / 2) : n; } template<unsigned N> __device__ __host__ inline uint getGridSum(int rows) { float GRID_SIZE = ((rows - (rows % N)) * (1 + rows / N)) / 2.f; GRID_SIZE += ceil(rows / (float) N) * (rows % N); return (uint) GRID_SIZE; } //lambda unroller template<int Begin, int End, int Step = 1> struct UnrollerL { template<typename Lambda> __device__ static void step(Lambda& func, const int offset) { func(Begin + offset); UnrollerL<Begin+Step, End, Step>::step(func, offset); } }; //end of lambda unroller template<int End, int Step> struct UnrollerL<End, End, Step> { template<typename Lambda> __device__ static void step(Lambda& func, const int offset) { } }; template<unsigned BLOCK_SIZE, unsigned UNROLL_N, unsigned INNER_N, bool diagonal_block, bool end_block, bool is_big> __device__ inline float loop(const int size, const int i, const int begin, const Atom (&a)[INNER_N], const Atom (&b)[INNER_N], const sMolecule A, const sMolecule B) { float sum = 0.0; auto body = [&] (int j) { auto inner_loop = [&] (int k) { if (not diagonal_block || i + k < begin + j) { float diff_x = A.x[begin + j] - a[k].x; float diff_y = A.y[begin + j] - a[k].y; float diff_z = A.z[begin + j] - a[k].z; float d_sumA = pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z); diff_x = B.x[begin + j] - b[k].x; diff_y = B.y[begin + j] - b[k].y; diff_z = B.z[begin + j] - b[k].z; float d_sumB = pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z); sum += pow_2(d_sumA * rsqrtf(d_sumA) - d_sumB * rsqrtf(d_sumB)); } }; if (not diagonal_block || i < begin + j) { // Real index of Atom corresponding to j. UnrollerL<0, INNER_N>::step(inner_loop, 0); } }; auto body2 = [&] (const int j) { const float4 & Ax4 = reinterpret_cast<const float4*>(A.x)[begin / 4 + j]; const float4 & Ay4 = reinterpret_cast<const float4*>(A.y)[begin / 4 + j]; const float4 & Az4 = reinterpret_cast<const float4*>(A.z)[begin / 4 + j]; const float4 & Bx4 = reinterpret_cast<const float4*>(B.x)[begin / 4 + j]; const float4 & By4 = reinterpret_cast<const float4*>(B.y)[begin / 4 + j]; const float4 & Bz4 = reinterpret_cast<const float4*>(B.z)[begin / 4 + j]; // printf("%f and %f\n", A.x[begin], Ax4.x); auto latency_mask = [&] (const int l) { auto inner_loop = [&] (const int k) { if (not diagonal_block || i + k < begin + j * 4 + l) { float diff_x = Float_4::get(Ax4, l) - a[k].x; float diff_y = Float_4::get(Ay4, l) - a[k].y; float diff_z = Float_4::get(Az4, l) - a[k].z; float d_sumA = pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z); diff_x = Float_4::get(Bx4, l) - b[k].x; diff_y = Float_4::get(By4, l) - b[k].y; diff_z = Float_4::get(Bz4, l) - b[k].z; float d_sumB = pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z); sum += pow_2(d_sumA * rsqrtf(d_sumA) - d_sumB * rsqrtf(d_sumB)); } }; if (not diagonal_block || i < begin + j * 4 + l) { // Real index of Atom corresponding to j. UnrollerL<0, INNER_N>::step(inner_loop, 0); } }; UnrollerL<0, 4>::step(latency_mask, 0); }; if (end_block) { switch (BLOCK_SIZE) { case BLOCK_SIZE_BIG_750: #pragma unroll 64 for (int j = 0; j < size; ++j) { body(j); } break; case BLOCK_SIZE_BIG_480: #pragma unroll 32 for (int j = 0; j < size; ++j) { body(j); } break; default: for (int j = 0; j < size; ++j) { body(j); } } } else { for (unsigned offset = 0; offset < BLOCK_SIZE / 4; offset += UNROLL_N / 4) { UnrollerL<0, UNROLL_N / 4>::step(body2, offset); } } // if (not is_big && diagonal_block) { // return sum / 2.f; // } else { return sum; // } } template <unsigned INNER_N> __device__ inline void getIndexes(uint block_idx, int & _row, int & _col) { block_idx += 1; // indexing is from zero but calculation need it from 1 int lower_row = (sqrt(8.f * block_idx * INNER_N + pow_2(INNER_N)) - INNER_N) / 2.f; // if (block_idx == 2 + 1) // printf("Lower row = %d\n", lower_row); for (int row = lower_row; row <= lower_row + INNER_N; ++row) { uint sum = getGridSum<INNER_N>(row); // if (block_idx == 2 + 1) // printf("block_idx = %d, row = %d, Exact sum = %d\n", block_idx, row, sum); if (sum >= block_idx) { _row = row - 1; // this way block will form lexicographic sort order according to pair (row, col) _col = sum - block_idx; return; } } _row = -1; _col = -1; } template <unsigned BLOCK_SIZE, unsigned UNROLL_N, unsigned INNER_N, bool is_big> __global__ void atoms_difference(const sMolecule A, const sMolecule B, float * d_result, const int n) { float sum = 0.f; __shared__ int row, col; __shared__ bool diagonal_block; __shared__ bool end_block; if (0 == threadIdx.x) { getIndexes<INNER_N>(blockIdx.x, row, col); // if (blockIdx.x == 2) // printf("BlockIdx = %d, Row = %d, Col = %d\n", blockIdx.x, row, col); diagonal_block = (row / INNER_N == col); } __syncthreads(); const int block_begin = col * BLOCK_SIZE * INNER_N; const int i = block_begin + threadIdx.x * INNER_N; const int begin = row * BLOCK_SIZE; #ifdef SHARED_ // printf("Loading Atom: %d\n", begin + threadIdx.x); __shared__ float A_x[BLOCK_SIZE], A_y[BLOCK_SIZE], A_z[BLOCK_SIZE]; A_x[threadIdx.x] = A.x[begin + threadIdx.x]; A_y[threadIdx.x] = A.y[begin + threadIdx.x]; A_z[threadIdx.x] = A.z[begin + threadIdx.x]; __shared__ float B_x[BLOCK_SIZE], B_y[BLOCK_SIZE], B_z[BLOCK_SIZE]; B_x[threadIdx.x] = B.x[begin + threadIdx.x]; B_y[threadIdx.x] = B.y[begin + threadIdx.x]; B_z[threadIdx.x] = B.z[begin + threadIdx.x]; #endif Atom a[INNER_N]; Atom b[INNER_N]; if (i >= n) { goto REDUCTION; } else { auto body = [&] (int j) { // printf("Against atom: %d\n", i + j); a[j].x = A.x[i + j]; a[j].y = A.y[i + j]; a[j].z = A.z[i + j]; b[j].x = B.x[i + j]; b[j].y = B.y[i + j]; b[j].z = B.z[i + j]; }; UnrollerL<0, INNER_N>::step(body, 0); // calculate upper bound __shared__ int size; if (threadIdx.x == 0) { int tmp_size = begin + BLOCK_SIZE - n; // calculate actual size of data block if (tmp_size < 0) { size = BLOCK_SIZE; end_block = false; } else { size = BLOCK_SIZE - tmp_size; end_block = true; } } __syncthreads(); if (true == diagonal_block && true == end_block) { sum = loop<BLOCK_SIZE, UNROLL_N, INNER_N, true, true, is_big> (size, i, begin, a, b, A, B); } else if (true == diagonal_block && false == end_block) { sum = loop<BLOCK_SIZE, UNROLL_N, INNER_N, true, false, is_big> (size, i, begin, a, b, A, B); } else if (false == diagonal_block && true == end_block) { sum = loop<BLOCK_SIZE, UNROLL_N, INNER_N, false, true, is_big> (size, i, begin, a, b, A, B); } else { sum = loop<BLOCK_SIZE, UNROLL_N, INNER_N, false, false, is_big> (size, i, begin, a, b, A, B); } } REDUCTION:; __shared__ float reduction[BLOCK_SIZE]; reduction[threadIdx.x] = sum; int size_red = BLOCK_SIZE; // auto body_reduction = [&] (int i) { // int size = BLOCK_SIZE / (2 << i); // if (threadIdx.x >= size) { // return; // } else { // reduction[threadIdx.x] += reduction[size + threadIdx.x]; // } // __syncthreads(); // }; // __syncthreads(); // UnrollerL<0, divisible_2(BLOCK_SIZE)>::step(body_reduction, 0); __syncthreads(); for (int i = 0; i < divisible_2(BLOCK_SIZE); ++i) { size_red /= 2; if (threadIdx.x >= size_red) { return; } else { reduction[threadIdx.x] += reduction[size_red + threadIdx.x]; } __syncthreads(); } // __syncthreads(); // while (size_red % 2 == 0) { // size_red /= 2; // if (threadIdx.x >= size_red) { // return; // } else { // reduction[threadIdx.x] += reduction[size_red + threadIdx.x]; // } // __syncthreads(); // } if (threadIdx.x == 0) { sum = 0; auto body_add = [&] (int i) { sum += reduction[i]; }; UnrollerL<0, factor_2(BLOCK_SIZE)>::step(body_add, 0); atomicAdd(&d_final_result, sum); } } constexpr bool isBig(const int n) { return n > 2000; } template <unsigned BLOCK_SIZE, unsigned UNROLL_N, unsigned INNER_N, bool is_big> float solveGPU_templated(const sMolecule d_A, const sMolecule d_B, const int n) { int rows = n / BLOCK_SIZE + (n % BLOCK_SIZE == 0 ? 0 : 1); // int cols = rows / INNER_N; int GRID_SIZE = getGridSum<INNER_N>(rows); float *d_result = NULL; float RMSD = 0; hipMemcpyToSymbol(d_final_result, &RMSD, sizeof(RMSD)); // printf("Grid size: %d, rows = %d, cols = %d\n", GRID_SIZE, rows, cols); hipLaunchKernelGGL(( atoms_difference<BLOCK_SIZE, UNROLL_N, INNER_N, is_big>) , dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, d_A, d_B, d_result, n); hipMemcpyFromSymbol(&RMSD, d_final_result, sizeof(RMSD)); return sqrt(1 / ((float)n * ((float)n - 1)) * RMSD); } GPU_t getCurrentGPU() { int device; hipGetDevice(&device); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device); if ("GeForce GTX 750" == std::string(deviceProp.name)) { return GPU_t::GTX_750; } else { return GPU_t::GTX_480; } } float solveGPU(const sMolecule d_A, const sMolecule d_B, const int n) { if (NONE == GPU_TYPE) { GPU_TYPE = getCurrentGPU(); } if (isBig(n)) { if (GPU_t::GTX_750 == GPU_TYPE) { return solveGPU_templated<BLOCK_SIZE_BIG_750, UNROLL_N_BIG_750, INNER_N_BIG_750, true>(d_A, d_B, n); } else { return solveGPU_templated<BLOCK_SIZE_BIG_480, UNROLL_N_BIG_480, INNER_N_BIG_480, true>(d_A, d_B, n); } } else { if (GPU_t::GTX_750 == GPU_TYPE) { return solveGPU_templated<BLOCK_SIZE_SMALL_750, UNROLL_N_SMALL_750, INNER_N_SMALL_750, false>(d_A, d_B, n); } else { return solveGPU_templated<BLOCK_SIZE_SMALL_480, UNROLL_N_SMALL_480, INNER_N_SMALL_480, false>(d_A, d_B, n); } } }
89207695d94217dc701fcdd9c2bfe25b77172bb9.cu
#include <cublas_v2.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/execution_policy.h> #include <algorithm> #include <string> // #define SHARED_ #define pow_2(x) ( ((x) * (x)) ) // ####### BLOCK SIZE ###### #define BLOCK_SIZE_BIG_750 512 #define BLOCK_SIZE_BIG_480 256 #define BLOCK_SIZE_SMALL_750 96 #define BLOCK_SIZE_SMALL_480 64 // #define BLOCK_SIZE_BIG_480 10 // #define BLOCK_SIZE_SMALL_750 4 // #define BLOCK_SIZE_SMALL_480 10 // ######################### // ####### UNROLLING ####### #define UNROLL_N_BIG_750 16 #define UNROLL_N_BIG_480 16 // #define UNROLL_N_BIG_750 1 // #define UNROLL_N_BIG_480 1 #define UNROLL_N_SMALL_750 8 #define UNROLL_N_SMALL_480 32 // #define UNROLL_N_SMALL_750 1 // #define UNROLL_N_SMALL_480 1 // ######################### // ####### UNROLLING ####### #define INNER_N_BIG_750 4 #define INNER_N_BIG_480 4 #define INNER_N_SMALL_750 2 #define INNER_N_SMALL_480 2 // ######################### enum GPU_t { NONE, GTX_750, GTX_480 }; GPU_t GPU_TYPE = NONE; float CPU_reduction(float *d_data, const unsigned int n) { float* h_odata = (float *) malloc(n * sizeof(float)); cudaMemcpy(h_odata, d_data, n * sizeof(float), cudaMemcpyDeviceToHost); float result = 0.f; for (uint i = 0; i < n; i++) { result += h_odata[i]; } free(h_odata); return result; } __device__ float d_final_result = 0.0f; // expects d_data to be array of size n = 2^k __global__ void GPU_reduction(float *d_data, unsigned int n) { const unsigned index = threadIdx.x + blockIdx.x * blockDim.x; d_final_result += d_data[index] + d_data[2 * index]; } struct Atom { float x, y, z; }; struct Float_4 { __device__ inline static constexpr float get(const float4 & data, const int & p) { return p == 0 ? data.x : (p == 1 ? data.y : (p == 2 ? data.z : data.w)); } }; // For given n computes maximal number k which satisfies n % 2^k == 0 __device__ constexpr int divisible_2(int n, int k = 0) { return n % 2 == 0 ? divisible_2(n / 2, k + 1) : k; } // For given n computes maximal number s such as n % s == 0 and s % 2 != 0 __device__ constexpr int factor_2(int n) { return n % 2 == 0 ? factor_2(n / 2) : n; } template<unsigned N> __device__ __host__ inline uint getGridSum(int rows) { float GRID_SIZE = ((rows - (rows % N)) * (1 + rows / N)) / 2.f; GRID_SIZE += ceil(rows / (float) N) * (rows % N); return (uint) GRID_SIZE; } //lambda unroller template<int Begin, int End, int Step = 1> struct UnrollerL { template<typename Lambda> __device__ static void step(Lambda& func, const int offset) { func(Begin + offset); UnrollerL<Begin+Step, End, Step>::step(func, offset); } }; //end of lambda unroller template<int End, int Step> struct UnrollerL<End, End, Step> { template<typename Lambda> __device__ static void step(Lambda& func, const int offset) { } }; template<unsigned BLOCK_SIZE, unsigned UNROLL_N, unsigned INNER_N, bool diagonal_block, bool end_block, bool is_big> __device__ inline float loop(const int size, const int i, const int begin, const Atom (&a)[INNER_N], const Atom (&b)[INNER_N], const sMolecule A, const sMolecule B) { float sum = 0.0; auto body = [&] (int j) { auto inner_loop = [&] (int k) { if (not diagonal_block || i + k < begin + j) { float diff_x = A.x[begin + j] - a[k].x; float diff_y = A.y[begin + j] - a[k].y; float diff_z = A.z[begin + j] - a[k].z; float d_sumA = pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z); diff_x = B.x[begin + j] - b[k].x; diff_y = B.y[begin + j] - b[k].y; diff_z = B.z[begin + j] - b[k].z; float d_sumB = pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z); sum += pow_2(d_sumA * rsqrtf(d_sumA) - d_sumB * rsqrtf(d_sumB)); } }; if (not diagonal_block || i < begin + j) { // Real index of Atom corresponding to j. UnrollerL<0, INNER_N>::step(inner_loop, 0); } }; auto body2 = [&] (const int j) { const float4 & Ax4 = reinterpret_cast<const float4*>(A.x)[begin / 4 + j]; const float4 & Ay4 = reinterpret_cast<const float4*>(A.y)[begin / 4 + j]; const float4 & Az4 = reinterpret_cast<const float4*>(A.z)[begin / 4 + j]; const float4 & Bx4 = reinterpret_cast<const float4*>(B.x)[begin / 4 + j]; const float4 & By4 = reinterpret_cast<const float4*>(B.y)[begin / 4 + j]; const float4 & Bz4 = reinterpret_cast<const float4*>(B.z)[begin / 4 + j]; // printf("%f and %f\n", A.x[begin], Ax4.x); auto latency_mask = [&] (const int l) { auto inner_loop = [&] (const int k) { if (not diagonal_block || i + k < begin + j * 4 + l) { float diff_x = Float_4::get(Ax4, l) - a[k].x; float diff_y = Float_4::get(Ay4, l) - a[k].y; float diff_z = Float_4::get(Az4, l) - a[k].z; float d_sumA = pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z); diff_x = Float_4::get(Bx4, l) - b[k].x; diff_y = Float_4::get(By4, l) - b[k].y; diff_z = Float_4::get(Bz4, l) - b[k].z; float d_sumB = pow_2(diff_x) + pow_2(diff_y) + pow_2(diff_z); sum += pow_2(d_sumA * rsqrtf(d_sumA) - d_sumB * rsqrtf(d_sumB)); } }; if (not diagonal_block || i < begin + j * 4 + l) { // Real index of Atom corresponding to j. UnrollerL<0, INNER_N>::step(inner_loop, 0); } }; UnrollerL<0, 4>::step(latency_mask, 0); }; if (end_block) { switch (BLOCK_SIZE) { case BLOCK_SIZE_BIG_750: #pragma unroll 64 for (int j = 0; j < size; ++j) { body(j); } break; case BLOCK_SIZE_BIG_480: #pragma unroll 32 for (int j = 0; j < size; ++j) { body(j); } break; default: for (int j = 0; j < size; ++j) { body(j); } } } else { for (unsigned offset = 0; offset < BLOCK_SIZE / 4; offset += UNROLL_N / 4) { UnrollerL<0, UNROLL_N / 4>::step(body2, offset); } } // if (not is_big && diagonal_block) { // return sum / 2.f; // } else { return sum; // } } template <unsigned INNER_N> __device__ inline void getIndexes(uint block_idx, int & _row, int & _col) { block_idx += 1; // indexing is from zero but calculation need it from 1 int lower_row = (sqrt(8.f * block_idx * INNER_N + pow_2(INNER_N)) - INNER_N) / 2.f; // if (block_idx == 2 + 1) // printf("Lower row = %d\n", lower_row); for (int row = lower_row; row <= lower_row + INNER_N; ++row) { uint sum = getGridSum<INNER_N>(row); // if (block_idx == 2 + 1) // printf("block_idx = %d, row = %d, Exact sum = %d\n", block_idx, row, sum); if (sum >= block_idx) { _row = row - 1; // this way block will form lexicographic sort order according to pair (row, col) _col = sum - block_idx; return; } } _row = -1; _col = -1; } template <unsigned BLOCK_SIZE, unsigned UNROLL_N, unsigned INNER_N, bool is_big> __global__ void atoms_difference(const sMolecule A, const sMolecule B, float * d_result, const int n) { float sum = 0.f; __shared__ int row, col; __shared__ bool diagonal_block; __shared__ bool end_block; if (0 == threadIdx.x) { getIndexes<INNER_N>(blockIdx.x, row, col); // if (blockIdx.x == 2) // printf("BlockIdx = %d, Row = %d, Col = %d\n", blockIdx.x, row, col); diagonal_block = (row / INNER_N == col); } __syncthreads(); const int block_begin = col * BLOCK_SIZE * INNER_N; const int i = block_begin + threadIdx.x * INNER_N; const int begin = row * BLOCK_SIZE; #ifdef SHARED_ // printf("Loading Atom: %d\n", begin + threadIdx.x); __shared__ float A_x[BLOCK_SIZE], A_y[BLOCK_SIZE], A_z[BLOCK_SIZE]; A_x[threadIdx.x] = A.x[begin + threadIdx.x]; A_y[threadIdx.x] = A.y[begin + threadIdx.x]; A_z[threadIdx.x] = A.z[begin + threadIdx.x]; __shared__ float B_x[BLOCK_SIZE], B_y[BLOCK_SIZE], B_z[BLOCK_SIZE]; B_x[threadIdx.x] = B.x[begin + threadIdx.x]; B_y[threadIdx.x] = B.y[begin + threadIdx.x]; B_z[threadIdx.x] = B.z[begin + threadIdx.x]; #endif Atom a[INNER_N]; Atom b[INNER_N]; if (i >= n) { goto REDUCTION; } else { auto body = [&] (int j) { // printf("Against atom: %d\n", i + j); a[j].x = A.x[i + j]; a[j].y = A.y[i + j]; a[j].z = A.z[i + j]; b[j].x = B.x[i + j]; b[j].y = B.y[i + j]; b[j].z = B.z[i + j]; }; UnrollerL<0, INNER_N>::step(body, 0); // calculate upper bound __shared__ int size; if (threadIdx.x == 0) { int tmp_size = begin + BLOCK_SIZE - n; // calculate actual size of data block if (tmp_size < 0) { size = BLOCK_SIZE; end_block = false; } else { size = BLOCK_SIZE - tmp_size; end_block = true; } } __syncthreads(); if (true == diagonal_block && true == end_block) { sum = loop<BLOCK_SIZE, UNROLL_N, INNER_N, true, true, is_big> (size, i, begin, a, b, A, B); } else if (true == diagonal_block && false == end_block) { sum = loop<BLOCK_SIZE, UNROLL_N, INNER_N, true, false, is_big> (size, i, begin, a, b, A, B); } else if (false == diagonal_block && true == end_block) { sum = loop<BLOCK_SIZE, UNROLL_N, INNER_N, false, true, is_big> (size, i, begin, a, b, A, B); } else { sum = loop<BLOCK_SIZE, UNROLL_N, INNER_N, false, false, is_big> (size, i, begin, a, b, A, B); } } REDUCTION:; __shared__ float reduction[BLOCK_SIZE]; reduction[threadIdx.x] = sum; int size_red = BLOCK_SIZE; // auto body_reduction = [&] (int i) { // int size = BLOCK_SIZE / (2 << i); // if (threadIdx.x >= size) { // return; // } else { // reduction[threadIdx.x] += reduction[size + threadIdx.x]; // } // __syncthreads(); // }; // __syncthreads(); // UnrollerL<0, divisible_2(BLOCK_SIZE)>::step(body_reduction, 0); __syncthreads(); for (int i = 0; i < divisible_2(BLOCK_SIZE); ++i) { size_red /= 2; if (threadIdx.x >= size_red) { return; } else { reduction[threadIdx.x] += reduction[size_red + threadIdx.x]; } __syncthreads(); } // __syncthreads(); // while (size_red % 2 == 0) { // size_red /= 2; // if (threadIdx.x >= size_red) { // return; // } else { // reduction[threadIdx.x] += reduction[size_red + threadIdx.x]; // } // __syncthreads(); // } if (threadIdx.x == 0) { sum = 0; auto body_add = [&] (int i) { sum += reduction[i]; }; UnrollerL<0, factor_2(BLOCK_SIZE)>::step(body_add, 0); atomicAdd(&d_final_result, sum); } } constexpr bool isBig(const int n) { return n > 2000; } template <unsigned BLOCK_SIZE, unsigned UNROLL_N, unsigned INNER_N, bool is_big> float solveGPU_templated(const sMolecule d_A, const sMolecule d_B, const int n) { int rows = n / BLOCK_SIZE + (n % BLOCK_SIZE == 0 ? 0 : 1); // int cols = rows / INNER_N; int GRID_SIZE = getGridSum<INNER_N>(rows); float *d_result = NULL; float RMSD = 0; cudaMemcpyToSymbol(d_final_result, &RMSD, sizeof(RMSD)); // printf("Grid size: %d, rows = %d, cols = %d\n", GRID_SIZE, rows, cols); atoms_difference<BLOCK_SIZE, UNROLL_N, INNER_N, is_big> <<<GRID_SIZE, BLOCK_SIZE>>>(d_A, d_B, d_result, n); cudaMemcpyFromSymbol(&RMSD, d_final_result, sizeof(RMSD)); return sqrt(1 / ((float)n * ((float)n - 1)) * RMSD); } GPU_t getCurrentGPU() { int device; cudaGetDevice(&device); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); if ("GeForce GTX 750" == std::string(deviceProp.name)) { return GPU_t::GTX_750; } else { return GPU_t::GTX_480; } } float solveGPU(const sMolecule d_A, const sMolecule d_B, const int n) { if (NONE == GPU_TYPE) { GPU_TYPE = getCurrentGPU(); } if (isBig(n)) { if (GPU_t::GTX_750 == GPU_TYPE) { return solveGPU_templated<BLOCK_SIZE_BIG_750, UNROLL_N_BIG_750, INNER_N_BIG_750, true>(d_A, d_B, n); } else { return solveGPU_templated<BLOCK_SIZE_BIG_480, UNROLL_N_BIG_480, INNER_N_BIG_480, true>(d_A, d_B, n); } } else { if (GPU_t::GTX_750 == GPU_TYPE) { return solveGPU_templated<BLOCK_SIZE_SMALL_750, UNROLL_N_SMALL_750, INNER_N_SMALL_750, false>(d_A, d_B, n); } else { return solveGPU_templated<BLOCK_SIZE_SMALL_480, UNROLL_N_SMALL_480, INNER_N_SMALL_480, false>(d_A, d_B, n); } } }
de4940f03e889a216708688cf7aa35f9b8e0c2d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //scan.cu //#include "kernel.hip" #include "comm.h" #include "wtime.h" #include "iostream" #define max_thd 256 #define max_block 256 #define thread_limit 256 #define block_limit 1024 #define GPU_COWORKER 1 graph * mygraph; long total_count; __global__ void block_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, long int* counter_1, long int* counter_2, index_t* count ) { int p = threadIdx.x/32; long counter1=0; long counter2=0; //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache // local[i]=a[i*m/max_thd]; __syncthreads(); counter1 += 8; //search int j=i; while(j<n){ vertex_t X = b[j]; counter1++; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; /* while(top>bot+1){ __syncthreads(); warp_path[3*p]=0; warp_path[3*p+1]=0; warp_path[3*p+2]=0; __syncthreads(); r = (top+bot)/2; Y = local[r]; if(X==Y){ mycount++; bot = top + max_thd; warp_path[3*p]=1; } if(X<Y){ top = r; warp_path[3*p+1]=1; } if(X>Y){ bot = r; warp_path[3*p+2]=1; } int k=0; if(warp_path[3*p]!=0){ k++; } if(warp_path[3*p+1]!=0){ k++; } if(warp_path[3*p+2]!=0){ k++; } counter2 +=k; } */ //phase 2 // bot = bot*m/max_thd; // top = top*m/max_thd -1; bot = 0; top = m-1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; counter1++; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += GPU_COWORKER * gridDim.x*blockDim.x/ max_thd; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]+=val; // count[blockIdx.x]=val; } counter_1[blockDim.x*blockIdx.x+threadIdx.x]+=counter1; counter_2[blockDim.x*blockIdx.x+threadIdx.x]+=counter2; } __global__ void warp_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, long int* counter_1, long int* counter_2, index_t* count ) { long counter1=0; long counter2=0; //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache // local[p*32+i]=a[i*m/32]; counter1+=8; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; counter1++; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; /* while(top>bot+1){ __syncthreads(); warp_path[3*p]=0; warp_path[3*p+1]=0; warp_path[3*p+2]=0; __syncthreads(); r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; warp_path[3*p]=1; } if(X<Y){ top = r; warp_path[3*p+1]=1; } if(X>Y){ bot = r; warp_path[3*p+2]=1; } int k=0; if(warp_path[3*p]!=0){ k++; } if(warp_path[3*p+1]!=0){ k++; } if(warp_path[3*p+2]!=0){ k++; } counter2 +=k; } */ //phase 2 bot = 0; top = m -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; counter1++; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } tid += GPU_COWORKER* blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); counter_1[blockDim.x*blockIdx.x+threadIdx.x]=counter1; counter_2[blockDim.x*blockIdx.x+threadIdx.x]=counter2; } //---------------------------------------------------------------------------------------- __global__ void classify_kernel //step 1: classify the edge list into different arrays ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, //inputs index_t* small_num, index_t* mid_num, index_t* large_num //outputs: small/large head, adjacent, and number by thread ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t small_offset=0; index_t mid_offset=0; index_t large_offset=0; //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit && n>0){ small_offset++; } else if(n>0){ //could be more then 2 catigories // else{ mid_offset++; } else { //could be more then 2 catigories large_offset++; } } } small_num[tid] = small_offset; mid_num[tid] = mid_offset; large_num[tid] = large_offset; } __global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256 ( index_t* data, index_t* block_offset ) { //step 1: each block do prefix sum inside int tid = threadIdx.x +blockIdx.x*blockDim.x; __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = data[tid]; __syncthreads(); index_t val=0; for(int i=0; i<=threadIdx.x; i++){ val += temp_in[i]; } __syncthreads(); if(threadIdx.x==255){ block_offset[blockIdx.x] = val; } data[tid] = val; __syncthreads(); } __global__ void prefix_kernel_2 ( index_t* block_offset ) { //step 2: collect each block's offset and do prefix for this set __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = block_offset[threadIdx.x]; __syncthreads(); index_t val=0; for(int i=0; i<threadIdx.x; i++){ val += temp_in[i]; } // val = temp_in[threadIdx.x]; block_offset[threadIdx.x] = val; __syncthreads(); } __global__ void prefix_kernel_3 ( index_t* data, index_t* block_offset ) { //step 3: update by adding block offset int tid = threadIdx.x + blockIdx.x*blockDim.x; index_t val = data[tid]; index_t offset = block_offset[blockIdx.x]; val += offset; data[tid] = val; __syncthreads(); } __global__ void collect_kernel ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, index_t* small_num, index_t* mid_num, index_t* large_num, index_t N1, index_t N2, vertex_t* dest_head, vertex_t* dest_adj ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t thd_base_small = 0; index_t thd_base_mid = N1; index_t thd_base_large = N1+N2; if(tid!=0){ thd_base_small = small_num[tid-1]; thd_base_mid = N1 + mid_num[tid-1]; thd_base_large = N1 + N2 + large_num[tid-1]; } //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; index_t small_offset = thd_base_small; index_t mid_offset = thd_base_mid; index_t large_offset = thd_base_large; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit && n>0){ dest_head[small_offset] = head; dest_adj [small_offset] = adj; small_offset++; } else if(n>0){ //could be more then 2 catigories // else{ dest_head[mid_offset] = head; dest_adj [mid_offset] = adj; mid_offset++; } else { //could be more then 2 catigories dest_head[large_offset] = head; dest_adj [large_offset] = adj; large_offset++; } } } } __global__ void reduce_kernel_count(index_t* count) { index_t val = 0; for(int i=0; i<max_block*max_block; i++){ val += count[i]; } count[0] = val; } __global__ void reduce_kernel(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void* part_scan(void * data){ index_t thd_count=0; int GPU_id = *(int*)data; int i = GPU_id; // cout<<"GPU id = "<<GPU_id<<"\n"; hipSetDevice(GPU_id); H_ERR(hipDeviceSynchronize() ); vertex_t* dev_adj; vertex_t* dev_head; index_t* dev_begin; index_t* dev_count; index_t partEdgeCount = mygraph->partEdgeCount[i]; vertex_t vert_count = mygraph->vert_count; vertex_t* partAdj = mygraph->partAdj[i]; vertex_t* partHead= mygraph->partHead[i]; // index_t* partDegree = mygraph->partDegree[i]; index_t* partBegin = mygraph->partBegin[i]; index_t* count = mygraph->count; H_ERR(hipMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&dev_head, partEdgeCount*sizeof(vertex_t)) ); // H_ERR(hipMalloc(&dev_degree, vert_count*sizeof(index_t)) ); H_ERR(hipMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) ); index_t* block_offset; H_ERR(hipMalloc(&block_offset, max_block*sizeof(index_t)) ); H_ERR(hipMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(dev_head, partHead, partEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); // H_ERR(hipMemcpy(dev_degree, partDegree, vert_count*sizeof(index_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) ); long counter_1_cpu=0; //long counter_2_cpu=0; long int tmp_counter1,tmp_counter2; long int* counter_1;//counter for memory read long int* counter_2;//counter for divergence H_ERR(hipMalloc(&counter_1, max_thd*max_block*sizeof(long int)) ); H_ERR(hipMalloc(&counter_2, max_thd*max_block*sizeof(long int)) ); double time2=wtime(); for(int j=0; j<PART_NUM; j++){ index_t totalEdgeCount = mygraph->partEdgeCount[j]; vertex_t* head = mygraph->partHead[j]; vertex_t* adj = mygraph->partAdj[j]; vertex_t* classified_head; vertex_t* classified_adj; index_t* small_num; index_t* mid_num; index_t* large_num; vertex_t* src_head; vertex_t* src_adj; H_ERR(hipMalloc(&small_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&mid_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&large_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMalloc(&classified_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&classified_adj, totalEdgeCount*sizeof(vertex_t)) ); // double time1=wtime(); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( classify_kernel) , dim3(max_block),dim3(max_thd), 0, 0, src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num ); H_ERR(hipDeviceSynchronize() ); //test for prefix sum hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, small_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, small_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, mid_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, mid_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, large_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, large_num, block_offset); H_ERR(hipDeviceSynchronize() ); index_t N1,N2,N3; H_ERR(hipMemcpy(&N1 , &small_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); H_ERR(hipMemcpy(&N2 , &mid_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); H_ERR(hipMemcpy(&N3 , &large_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); H_ERR(hipDeviceSynchronize() ); // cout<<"N1 = "<<N1<<"\n"; // cout<<"N2 = "<<N2<<"\n"; // cout<<"N3 = "<<N3<<"\n"; hipLaunchKernelGGL(( collect_kernel) , dim3(max_block),dim3(max_thd), 0, 0, src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num, N1, N2, classified_head, classified_adj ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( warp_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0, classified_head, classified_adj, dev_adj, dev_begin, 0, N1, counter_1, counter_2, dev_count ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( block_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0, classified_head, classified_adj, dev_adj, dev_begin, N1, N1+N2, // 0 + GPU_id*256, // totalEdgeCount, counter_1, counter_2, dev_count ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_kernel) , dim3(1),dim3(1), 0, 0, dev_count); H_ERR(hipDeviceSynchronize() ); H_ERR(hipMemcpy(&count[i], dev_count, sizeof(index_t), hipMemcpyDeviceToHost)); thd_count += count[i]; hipLaunchKernelGGL(( reduce_kernel_count) , dim3(1),dim3(1), 0, 0, counter_1); H_ERR(hipDeviceSynchronize() ); // hipLaunchKernelGGL(( reduce_kernel_count) , dim3(1),dim3(1), 0, 0, counter_2); // H_ERR(hipDeviceSynchronize() ); //long int tmp_counter1,tmp_counter2; H_ERR(hipMemcpy(&tmp_counter1, counter_1, sizeof(long), hipMemcpyDeviceToHost)); // H_ERR(hipMemcpy(&tmp_counter2, counter_2, sizeof(long), hipMemcpyDeviceToHost)); counter_1_cpu += tmp_counter1; // counter_2_cpu += tmp_counter2; H_ERR(hipFree(small_num) ); H_ERR(hipFree(large_num) ); H_ERR(hipFree(classified_head) ); H_ERR(hipFree(classified_adj) ); H_ERR(hipFree(src_head) ); H_ERR(hipFree(src_adj) ); // H_ERR(hipFree(src_begin) ); cout<<"GPU "<<i<<" part "<<j<<"\n"; } double time4 = wtime(); count[i] = thd_count; cout<<"gpu "<<i<<" binary count="<<count[i]<<"\n"; cout<<"time = "<<time4-time2<<" seconds"<<endl; cout<<"counter for mem_read = "<<counter_1_cpu<<endl; // cout<<"counter for divergence = "<<counter_2_cpu<<endl; total_count += counter_1_cpu; H_ERR(hipFree(dev_adj) ); H_ERR(hipFree(dev_head) ); // H_ERR(hipFree(dev_degree) ); H_ERR(hipFree(dev_begin) ); H_ERR(hipFree(block_offset) ); H_ERR(hipFree(dev_count) ); return NULL; }
de4940f03e889a216708688cf7aa35f9b8e0c2d9.cu
//scan.cu //#include "kernel.cu" #include "comm.h" #include "wtime.h" #include "iostream" #define max_thd 256 #define max_block 256 #define thread_limit 256 #define block_limit 1024 #define GPU_COWORKER 1 graph * mygraph; long total_count; __global__ void block_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, long int* counter_1, long int* counter_2, index_t* count ) { int p = threadIdx.x/32; long counter1=0; long counter2=0; //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache // local[i]=a[i*m/max_thd]; __syncthreads(); counter1 += 8; //search int j=i; while(j<n){ vertex_t X = b[j]; counter1++; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; /* while(top>bot+1){ __syncthreads(); warp_path[3*p]=0; warp_path[3*p+1]=0; warp_path[3*p+2]=0; __syncthreads(); r = (top+bot)/2; Y = local[r]; if(X==Y){ mycount++; bot = top + max_thd; warp_path[3*p]=1; } if(X<Y){ top = r; warp_path[3*p+1]=1; } if(X>Y){ bot = r; warp_path[3*p+2]=1; } int k=0; if(warp_path[3*p]!=0){ k++; } if(warp_path[3*p+1]!=0){ k++; } if(warp_path[3*p+2]!=0){ k++; } counter2 +=k; } */ //phase 2 // bot = bot*m/max_thd; // top = top*m/max_thd -1; bot = 0; top = m-1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; counter1++; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += GPU_COWORKER * gridDim.x*blockDim.x/ max_thd; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]+=val; // count[blockIdx.x]=val; } counter_1[blockDim.x*blockIdx.x+threadIdx.x]+=counter1; counter_2[blockDim.x*blockIdx.x+threadIdx.x]+=counter2; } __global__ void warp_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, long int* counter_1, long int* counter_2, index_t* count ) { long counter1=0; long counter2=0; //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache // local[p*32+i]=a[i*m/32]; counter1+=8; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; counter1++; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; /* while(top>bot+1){ __syncthreads(); warp_path[3*p]=0; warp_path[3*p+1]=0; warp_path[3*p+2]=0; __syncthreads(); r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; warp_path[3*p]=1; } if(X<Y){ top = r; warp_path[3*p+1]=1; } if(X>Y){ bot = r; warp_path[3*p+2]=1; } int k=0; if(warp_path[3*p]!=0){ k++; } if(warp_path[3*p+1]!=0){ k++; } if(warp_path[3*p+2]!=0){ k++; } counter2 +=k; } */ //phase 2 bot = 0; top = m -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; counter1++; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } tid += GPU_COWORKER* blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); counter_1[blockDim.x*blockIdx.x+threadIdx.x]=counter1; counter_2[blockDim.x*blockIdx.x+threadIdx.x]=counter2; } //---------------------------------------------------------------------------------------- __global__ void classify_kernel //step 1: classify the edge list into different arrays ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, //inputs index_t* small_num, index_t* mid_num, index_t* large_num //outputs: small/large head, adjacent, and number by thread ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t small_offset=0; index_t mid_offset=0; index_t large_offset=0; //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit && n>0){ small_offset++; } else if(n>0){ //could be more then 2 catigories // else{ mid_offset++; } else { //could be more then 2 catigories large_offset++; } } } small_num[tid] = small_offset; mid_num[tid] = mid_offset; large_num[tid] = large_offset; } __global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256 ( index_t* data, index_t* block_offset ) { //step 1: each block do prefix sum inside int tid = threadIdx.x +blockIdx.x*blockDim.x; __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = data[tid]; __syncthreads(); index_t val=0; for(int i=0; i<=threadIdx.x; i++){ val += temp_in[i]; } __syncthreads(); if(threadIdx.x==255){ block_offset[blockIdx.x] = val; } data[tid] = val; __syncthreads(); } __global__ void prefix_kernel_2 ( index_t* block_offset ) { //step 2: collect each block's offset and do prefix for this set __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = block_offset[threadIdx.x]; __syncthreads(); index_t val=0; for(int i=0; i<threadIdx.x; i++){ val += temp_in[i]; } // val = temp_in[threadIdx.x]; block_offset[threadIdx.x] = val; __syncthreads(); } __global__ void prefix_kernel_3 ( index_t* data, index_t* block_offset ) { //step 3: update by adding block offset int tid = threadIdx.x + blockIdx.x*blockDim.x; index_t val = data[tid]; index_t offset = block_offset[blockIdx.x]; val += offset; data[tid] = val; __syncthreads(); } __global__ void collect_kernel ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, index_t* small_num, index_t* mid_num, index_t* large_num, index_t N1, index_t N2, vertex_t* dest_head, vertex_t* dest_adj ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t thd_base_small = 0; index_t thd_base_mid = N1; index_t thd_base_large = N1+N2; if(tid!=0){ thd_base_small = small_num[tid-1]; thd_base_mid = N1 + mid_num[tid-1]; thd_base_large = N1 + N2 + large_num[tid-1]; } //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; index_t small_offset = thd_base_small; index_t mid_offset = thd_base_mid; index_t large_offset = thd_base_large; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit && n>0){ dest_head[small_offset] = head; dest_adj [small_offset] = adj; small_offset++; } else if(n>0){ //could be more then 2 catigories // else{ dest_head[mid_offset] = head; dest_adj [mid_offset] = adj; mid_offset++; } else { //could be more then 2 catigories dest_head[large_offset] = head; dest_adj [large_offset] = adj; large_offset++; } } } } __global__ void reduce_kernel_count(index_t* count) { index_t val = 0; for(int i=0; i<max_block*max_block; i++){ val += count[i]; } count[0] = val; } __global__ void reduce_kernel(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void* part_scan(void * data){ index_t thd_count=0; int GPU_id = *(int*)data; int i = GPU_id; // cout<<"GPU id = "<<GPU_id<<"\n"; cudaSetDevice(GPU_id); H_ERR(cudaDeviceSynchronize() ); vertex_t* dev_adj; vertex_t* dev_head; index_t* dev_begin; index_t* dev_count; index_t partEdgeCount = mygraph->partEdgeCount[i]; vertex_t vert_count = mygraph->vert_count; vertex_t* partAdj = mygraph->partAdj[i]; vertex_t* partHead= mygraph->partHead[i]; // index_t* partDegree = mygraph->partDegree[i]; index_t* partBegin = mygraph->partBegin[i]; index_t* count = mygraph->count; H_ERR(cudaMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&dev_head, partEdgeCount*sizeof(vertex_t)) ); // H_ERR(cudaMalloc(&dev_degree, vert_count*sizeof(index_t)) ); H_ERR(cudaMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) ); index_t* block_offset; H_ERR(cudaMalloc(&block_offset, max_block*sizeof(index_t)) ); H_ERR(cudaMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(dev_head, partHead, partEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); // H_ERR(cudaMemcpy(dev_degree, partDegree, vert_count*sizeof(index_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) ); long counter_1_cpu=0; //long counter_2_cpu=0; long int tmp_counter1,tmp_counter2; long int* counter_1;//counter for memory read long int* counter_2;//counter for divergence H_ERR(cudaMalloc(&counter_1, max_thd*max_block*sizeof(long int)) ); H_ERR(cudaMalloc(&counter_2, max_thd*max_block*sizeof(long int)) ); double time2=wtime(); for(int j=0; j<PART_NUM; j++){ index_t totalEdgeCount = mygraph->partEdgeCount[j]; vertex_t* head = mygraph->partHead[j]; vertex_t* adj = mygraph->partAdj[j]; vertex_t* classified_head; vertex_t* classified_adj; index_t* small_num; index_t* mid_num; index_t* large_num; vertex_t* src_head; vertex_t* src_adj; H_ERR(cudaMalloc(&small_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&mid_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&large_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMalloc(&classified_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&classified_adj, totalEdgeCount*sizeof(vertex_t)) ); // double time1=wtime(); H_ERR(cudaDeviceSynchronize() ); classify_kernel <<<max_block,max_thd>>>( src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num ); H_ERR(cudaDeviceSynchronize() ); //test for prefix sum prefix_kernel_1 <<<max_block,max_thd>>>(small_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(small_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_1 <<<max_block,max_thd>>>(mid_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(mid_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_1 <<<max_block,max_thd>>>(large_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(large_num, block_offset); H_ERR(cudaDeviceSynchronize() ); index_t N1,N2,N3; H_ERR(cudaMemcpy(&N1 , &small_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); H_ERR(cudaMemcpy(&N2 , &mid_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); H_ERR(cudaMemcpy(&N3 , &large_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); H_ERR(cudaDeviceSynchronize() ); // cout<<"N1 = "<<N1<<"\n"; // cout<<"N2 = "<<N2<<"\n"; // cout<<"N3 = "<<N3<<"\n"; collect_kernel <<<max_block,max_thd>>>( src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num, N1, N2, classified_head, classified_adj ); H_ERR(cudaDeviceSynchronize() ); warp_binary_kernel<<<max_block,max_thd>>> ( classified_head, classified_adj, dev_adj, dev_begin, 0, N1, counter_1, counter_2, dev_count ); H_ERR(cudaDeviceSynchronize() ); block_binary_kernel<<<max_block,max_thd>>> ( classified_head, classified_adj, dev_adj, dev_begin, N1, N1+N2, // 0 + GPU_id*256, // totalEdgeCount, counter_1, counter_2, dev_count ); H_ERR(cudaDeviceSynchronize() ); reduce_kernel <<<1,1>>>(dev_count); H_ERR(cudaDeviceSynchronize() ); H_ERR(cudaMemcpy(&count[i], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost)); thd_count += count[i]; reduce_kernel_count <<<1,1>>>(counter_1); H_ERR(cudaDeviceSynchronize() ); // reduce_kernel_count <<<1,1>>>(counter_2); // H_ERR(cudaDeviceSynchronize() ); //long int tmp_counter1,tmp_counter2; H_ERR(cudaMemcpy(&tmp_counter1, counter_1, sizeof(long), cudaMemcpyDeviceToHost)); // H_ERR(cudaMemcpy(&tmp_counter2, counter_2, sizeof(long), cudaMemcpyDeviceToHost)); counter_1_cpu += tmp_counter1; // counter_2_cpu += tmp_counter2; H_ERR(cudaFree(small_num) ); H_ERR(cudaFree(large_num) ); H_ERR(cudaFree(classified_head) ); H_ERR(cudaFree(classified_adj) ); H_ERR(cudaFree(src_head) ); H_ERR(cudaFree(src_adj) ); // H_ERR(cudaFree(src_begin) ); cout<<"GPU "<<i<<" part "<<j<<"\n"; } double time4 = wtime(); count[i] = thd_count; cout<<"gpu "<<i<<" binary count="<<count[i]<<"\n"; cout<<"time = "<<time4-time2<<" seconds"<<endl; cout<<"counter for mem_read = "<<counter_1_cpu<<endl; // cout<<"counter for divergence = "<<counter_2_cpu<<endl; total_count += counter_1_cpu; H_ERR(cudaFree(dev_adj) ); H_ERR(cudaFree(dev_head) ); // H_ERR(cudaFree(dev_degree) ); H_ERR(cudaFree(dev_begin) ); H_ERR(cudaFree(block_offset) ); H_ERR(cudaFree(dev_count) ); return NULL; }
da50c920f441ef9de7bdbc976b47b1e00b406223.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <omp.h> #include <iostream> #include <snmg/utils.cuh> namespace cugraph { namespace snmg { static bool PeerAccessAlreadyEnabled = false; // basic info about the snmg env setup SNMGinfo::SNMGinfo() { int tmp_p, tmp_i; //get info from cuda hipGetDeviceCount(&tmp_p); hipGetDevice(&tmp_i); //get info from omp i = omp_get_thread_num(); p = omp_get_num_threads(); // check that thread_num and num_threads are compatible with the device ID and the number of device if (tmp_i != i) { std::cerr << "Thread ID and GPU ID do not match" << std::endl; } if (p > tmp_p) { std::cerr << "More threads than GPUs" << std::endl; } // number of SM, usefull for kernels paramters hipDeviceGetAttribute(&n_sm, hipDeviceAttributeMultiprocessorCount, i); CUDA_CHECK_LAST(); } SNMGinfo::~SNMGinfo() { } int SNMGinfo::get_thread_num() { return i; } int SNMGinfo::get_num_threads() { return p; } int SNMGinfo::get_num_sm() { return n_sm; } // enable peer access (all to all) void SNMGinfo::setup_peer_access() { if (PeerAccessAlreadyEnabled) return; for (int j = 0; j < p; ++j) { if (i != j) { int canAccessPeer = 0; hipDeviceCanAccessPeer(&canAccessPeer, i, j); CUDA_CHECK_LAST(); if (canAccessPeer) { hipDeviceEnablePeerAccess(j, 0); hipError_t status = hipGetLastError(); if (!(status == hipSuccess || status == hipErrorPeerAccessAlreadyEnabled)) { std::cerr << "Could not Enable Peer Access from" << i << " to " << j << std::endl; } } else { std::cerr << "P2P access required from " << i << " to " << j << std::endl; } } } PeerAccessAlreadyEnabled = true; } void sync_all() { hipDeviceSynchronize(); #pragma omp barrier } void print_mem_usage() { size_t free,total; hipMemGetInfo(&free, &total); std::cout<< std::endl<< "Mem used: "<<total-free<<std::endl; } } } //namespace
da50c920f441ef9de7bdbc976b47b1e00b406223.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <omp.h> #include <iostream> #include <snmg/utils.cuh> namespace cugraph { namespace snmg { static bool PeerAccessAlreadyEnabled = false; // basic info about the snmg env setup SNMGinfo::SNMGinfo() { int tmp_p, tmp_i; //get info from cuda cudaGetDeviceCount(&tmp_p); cudaGetDevice(&tmp_i); //get info from omp i = omp_get_thread_num(); p = omp_get_num_threads(); // check that thread_num and num_threads are compatible with the device ID and the number of device if (tmp_i != i) { std::cerr << "Thread ID and GPU ID do not match" << std::endl; } if (p > tmp_p) { std::cerr << "More threads than GPUs" << std::endl; } // number of SM, usefull for kernels paramters cudaDeviceGetAttribute(&n_sm, cudaDevAttrMultiProcessorCount, i); CUDA_CHECK_LAST(); } SNMGinfo::~SNMGinfo() { } int SNMGinfo::get_thread_num() { return i; } int SNMGinfo::get_num_threads() { return p; } int SNMGinfo::get_num_sm() { return n_sm; } // enable peer access (all to all) void SNMGinfo::setup_peer_access() { if (PeerAccessAlreadyEnabled) return; for (int j = 0; j < p; ++j) { if (i != j) { int canAccessPeer = 0; cudaDeviceCanAccessPeer(&canAccessPeer, i, j); CUDA_CHECK_LAST(); if (canAccessPeer) { cudaDeviceEnablePeerAccess(j, 0); cudaError_t status = cudaGetLastError(); if (!(status == cudaSuccess || status == cudaErrorPeerAccessAlreadyEnabled)) { std::cerr << "Could not Enable Peer Access from" << i << " to " << j << std::endl; } } else { std::cerr << "P2P access required from " << i << " to " << j << std::endl; } } } PeerAccessAlreadyEnabled = true; } void sync_all() { cudaDeviceSynchronize(); #pragma omp barrier } void print_mem_usage() { size_t free,total; cudaMemGetInfo(&free, &total); std::cout<< std::endl<< "Mem used: "<<total-free<<std::endl; } } } //namespace
fc2519ab5f5c7c1231c118ab4b782f3e16ce20c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_utils.h" namespace TNN_NS { DECLARE_CUDA_ACC(PadV2, LAYER_PADV2); template <typename T> __global__ void pad_default_kernel_v2(const T* src, T* dst, int count, int input_channel, int output_channel, int pad_c, int output_d, int output_h, int output_w, int input_d, int input_h, int input_w, int pad_d, int pad_h, int pad_w, T value) { CUDA_KERNEL_LOOP(idx, count) { int dst_n = idx / (output_channel * output_d * output_h * output_w); int dst_c = (idx / (output_d * output_h * output_w)) % output_channel; int dst_d = (idx / (output_h * output_w)) % output_d; int dst_h = (idx / output_w) % output_h; int dst_w = idx % output_w; if (dst_c < pad_c || dst_c >= input_channel + pad_c || dst_d < pad_d || dst_d >= input_d + pad_d || dst_h < pad_h || dst_h >= (pad_h + input_h) || dst_w < pad_w || dst_w >= (pad_w + input_w)) { dst[idx] = value; } else { int src_idx = dst_n * input_channel * input_d * input_h * input_w + (dst_c - pad_c) * input_d * input_h * input_w + (dst_d - pad_d) * input_h * input_w + (dst_h - pad_h) * input_w + (dst_w - pad_w); dst[idx] = src[src_idx]; } } } template <typename T> __global__ void pad_reflect_kernel_v2(const T* src, T* dst, int count, int channels, int output_d, int output_h, int output_w, int input_d, int input_h, int input_w, int pad_d, int pad_h, int pad_w) { CUDA_KERNEL_LOOP(idx, count) { int dst_n = idx / (channels * output_d * output_h * output_w); int dst_c = (idx / (output_d * output_h * output_w)) % channels; int dst_d = (idx / (output_h * output_w)) % output_d; int dst_h = (idx / output_w) % output_h; int dst_w = idx % output_w; int d = dst_d >= pad_d? (dst_d < pad_d + input_d? dst_d - pad_d : pad_d - 2 - dst_d + 2 * input_d) : pad_d - dst_d; int h = dst_h >= pad_h? (dst_h < pad_h + input_h? dst_h - pad_h : pad_h - 2 - dst_h + 2 * input_h) : pad_h - dst_h; int w = dst_w >= pad_w? (dst_w < pad_w + input_w? dst_w - pad_w : pad_w - 2 - dst_w + 2 * input_w) : pad_w - dst_w; dst[idx] = src[dst_n * channels * input_d * input_h * input_w + dst_c * input_d * input_h * input_w + d * input_h * input_w + h * input_w + w]; } } Status CudaPadV2LayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return CudaLayerAcc::Init(context, param, resource, inputs, outputs); } Status CudaPadV2LayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return TNN_OK; } Status CudaPadV2LayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { auto params = dynamic_cast<PadLayerParam *>(param_); if (!params) { LOGE("Error: PadV2LayerParam is nil\n"); return Status(TNNERR_MODEL_ERR, "Error: PadV2LayerParam is nil"); } Blob* output_blob = outputs[0]; Blob* input_blob = inputs[0]; auto output_dims = output_blob->GetBlobDesc().dims; auto input_dims = input_blob->GetBlobDesc().dims; int pad_c = 0, pad_d = 0, pad_h = 0, pad_w = 0; int output_c = 1, output_d = 1, output_h = 1, output_w = 1; int input_c = 1, input_d = 1, input_h = 1, input_w = 1; pad_c = params->pads[1]; output_c = output_dims[1]; input_c = input_dims[1]; if(output_dims.size() > 2) { pad_d = params->pads[2]; output_d = output_dims[2]; input_d = input_dims[2]; } if(output_dims.size() > 3) { pad_h = params->pads[3]; output_h = output_dims[3]; input_h = input_dims[3]; } if(output_dims.size() > 4) { pad_w = params->pads[4]; output_w = output_dims[4]; input_w = input_dims[4]; } const int count = DimsVectorUtils::Count(output_blob->GetBlobDesc().dims); float value = params->value; void* input_data = input_blob->GetHandle().base; void* output_data = output_blob->GetHandle().base; if (input_blob->GetBlobDesc().data_type == DATA_TYPE_FLOAT) { if (params->type == 0) { hipLaunchKernelGGL(( pad_default_kernel_v2), dim3(TNN_CUDA_GET_BLOCKS(count)), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(), static_cast<float*>(input_data), static_cast<float*>(output_data), count, input_c, output_c, pad_c, output_d, output_h, output_w, input_d, input_h, input_w, pad_d, pad_h, pad_w, value); } else if(params->type == 1) { hipLaunchKernelGGL(( pad_reflect_kernel_v2), dim3(TNN_CUDA_GET_BLOCKS(count)), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(), static_cast<float*>(input_data), static_cast<float*>(output_data), count, output_c, output_d, output_h, output_w, input_d, input_h, input_w, pad_d, pad_h, pad_w); } else { LOGE("Error: layer acc dont support pad type: %d\n", params->type); return Status(TNNERR_MODEL_ERR, "Error: layer acc don't support pad type"); } } else if (input_blob->GetBlobDesc().data_type == DATA_TYPE_HALF) { if (params->type == 0) { hipLaunchKernelGGL(( pad_default_kernel_v2), dim3(TNN_CUDA_GET_BLOCKS(count)), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(), static_cast<__half*>(input_data), static_cast<__half*>(output_data), count, input_c, output_c, pad_c, output_d, output_h, output_w, input_d, input_h, input_w, pad_d, pad_h, pad_w, __float2half(value)); } else if(params->type == 1) { hipLaunchKernelGGL(( pad_reflect_kernel_v2), dim3(TNN_CUDA_GET_BLOCKS(count)), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(), static_cast<__half*>(input_data), static_cast<__half*>(output_data), count, output_c, output_d, output_h, output_w, input_d, input_h, input_w, pad_d, pad_h, pad_w); } else { LOGE("Error: layer acc dont support pad type: %d\n", params->type); return Status(TNNERR_MODEL_ERR, "Error: layer acc don't support pad type"); } } else { LOGE("Error: layer acc dont support datatype: %d\n", input_blob->GetBlobDesc().data_type); return Status(TNNERR_MODEL_ERR, "Error: layer acc don't support datatype"); } return TNN_OK; } REGISTER_CUDA_ACC(PadV2, LAYER_PADV2); } // namespace TNN_NS
fc2519ab5f5c7c1231c118ab4b782f3e16ce20c4.cu
// Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_utils.h" namespace TNN_NS { DECLARE_CUDA_ACC(PadV2, LAYER_PADV2); template <typename T> __global__ void pad_default_kernel_v2(const T* src, T* dst, int count, int input_channel, int output_channel, int pad_c, int output_d, int output_h, int output_w, int input_d, int input_h, int input_w, int pad_d, int pad_h, int pad_w, T value) { CUDA_KERNEL_LOOP(idx, count) { int dst_n = idx / (output_channel * output_d * output_h * output_w); int dst_c = (idx / (output_d * output_h * output_w)) % output_channel; int dst_d = (idx / (output_h * output_w)) % output_d; int dst_h = (idx / output_w) % output_h; int dst_w = idx % output_w; if (dst_c < pad_c || dst_c >= input_channel + pad_c || dst_d < pad_d || dst_d >= input_d + pad_d || dst_h < pad_h || dst_h >= (pad_h + input_h) || dst_w < pad_w || dst_w >= (pad_w + input_w)) { dst[idx] = value; } else { int src_idx = dst_n * input_channel * input_d * input_h * input_w + (dst_c - pad_c) * input_d * input_h * input_w + (dst_d - pad_d) * input_h * input_w + (dst_h - pad_h) * input_w + (dst_w - pad_w); dst[idx] = src[src_idx]; } } } template <typename T> __global__ void pad_reflect_kernel_v2(const T* src, T* dst, int count, int channels, int output_d, int output_h, int output_w, int input_d, int input_h, int input_w, int pad_d, int pad_h, int pad_w) { CUDA_KERNEL_LOOP(idx, count) { int dst_n = idx / (channels * output_d * output_h * output_w); int dst_c = (idx / (output_d * output_h * output_w)) % channels; int dst_d = (idx / (output_h * output_w)) % output_d; int dst_h = (idx / output_w) % output_h; int dst_w = idx % output_w; int d = dst_d >= pad_d? (dst_d < pad_d + input_d? dst_d - pad_d : pad_d - 2 - dst_d + 2 * input_d) : pad_d - dst_d; int h = dst_h >= pad_h? (dst_h < pad_h + input_h? dst_h - pad_h : pad_h - 2 - dst_h + 2 * input_h) : pad_h - dst_h; int w = dst_w >= pad_w? (dst_w < pad_w + input_w? dst_w - pad_w : pad_w - 2 - dst_w + 2 * input_w) : pad_w - dst_w; dst[idx] = src[dst_n * channels * input_d * input_h * input_w + dst_c * input_d * input_h * input_w + d * input_h * input_w + h * input_w + w]; } } Status CudaPadV2LayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return CudaLayerAcc::Init(context, param, resource, inputs, outputs); } Status CudaPadV2LayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return TNN_OK; } Status CudaPadV2LayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { auto params = dynamic_cast<PadLayerParam *>(param_); if (!params) { LOGE("Error: PadV2LayerParam is nil\n"); return Status(TNNERR_MODEL_ERR, "Error: PadV2LayerParam is nil"); } Blob* output_blob = outputs[0]; Blob* input_blob = inputs[0]; auto output_dims = output_blob->GetBlobDesc().dims; auto input_dims = input_blob->GetBlobDesc().dims; int pad_c = 0, pad_d = 0, pad_h = 0, pad_w = 0; int output_c = 1, output_d = 1, output_h = 1, output_w = 1; int input_c = 1, input_d = 1, input_h = 1, input_w = 1; pad_c = params->pads[1]; output_c = output_dims[1]; input_c = input_dims[1]; if(output_dims.size() > 2) { pad_d = params->pads[2]; output_d = output_dims[2]; input_d = input_dims[2]; } if(output_dims.size() > 3) { pad_h = params->pads[3]; output_h = output_dims[3]; input_h = input_dims[3]; } if(output_dims.size() > 4) { pad_w = params->pads[4]; output_w = output_dims[4]; input_w = input_dims[4]; } const int count = DimsVectorUtils::Count(output_blob->GetBlobDesc().dims); float value = params->value; void* input_data = input_blob->GetHandle().base; void* output_data = output_blob->GetHandle().base; if (input_blob->GetBlobDesc().data_type == DATA_TYPE_FLOAT) { if (params->type == 0) { pad_default_kernel_v2<<<TNN_CUDA_GET_BLOCKS(count), TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>( static_cast<float*>(input_data), static_cast<float*>(output_data), count, input_c, output_c, pad_c, output_d, output_h, output_w, input_d, input_h, input_w, pad_d, pad_h, pad_w, value); } else if(params->type == 1) { pad_reflect_kernel_v2<<<TNN_CUDA_GET_BLOCKS(count), TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>( static_cast<float*>(input_data), static_cast<float*>(output_data), count, output_c, output_d, output_h, output_w, input_d, input_h, input_w, pad_d, pad_h, pad_w); } else { LOGE("Error: layer acc dont support pad type: %d\n", params->type); return Status(TNNERR_MODEL_ERR, "Error: layer acc don't support pad type"); } } else if (input_blob->GetBlobDesc().data_type == DATA_TYPE_HALF) { if (params->type == 0) { pad_default_kernel_v2<<<TNN_CUDA_GET_BLOCKS(count), TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>( static_cast<__half*>(input_data), static_cast<__half*>(output_data), count, input_c, output_c, pad_c, output_d, output_h, output_w, input_d, input_h, input_w, pad_d, pad_h, pad_w, __float2half(value)); } else if(params->type == 1) { pad_reflect_kernel_v2<<<TNN_CUDA_GET_BLOCKS(count), TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>( static_cast<__half*>(input_data), static_cast<__half*>(output_data), count, output_c, output_d, output_h, output_w, input_d, input_h, input_w, pad_d, pad_h, pad_w); } else { LOGE("Error: layer acc dont support pad type: %d\n", params->type); return Status(TNNERR_MODEL_ERR, "Error: layer acc don't support pad type"); } } else { LOGE("Error: layer acc dont support datatype: %d\n", input_blob->GetBlobDesc().data_type); return Status(TNNERR_MODEL_ERR, "Error: layer acc don't support datatype"); } return TNN_OK; } REGISTER_CUDA_ACC(PadV2, LAYER_PADV2); } // namespace TNN_NS
3363947eece610ad7ddac339db128fdc927dada2.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <rocblas.h> #include <hip/hip_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { hipblasHandle_t cublasH = NULL; hipStream_t stream = NULL; const int m = 2; const int n = 2; const int lda = m; /* * A = | 1.0 2.0 | * | 3.0 4.0 | * x = | 5.0 6.0 | */ const std::vector<data_type> A = {1.0, 3.0, 2.0, 4.0}; const std::vector<data_type> x = {5.0, 6.0}; std::vector<data_type> y(m, 0); const data_type alpha = 1.0; const data_type beta = 0.0; const int incx = 1; const int incy = 1; data_type *d_A = nullptr; data_type *d_x = nullptr; data_type *d_y = nullptr; hipblasOperation_t transa = HIPBLAS_OP_N; printf("A\n"); print_matrix(m, n, A.data(), lda); printf("=====\n"); printf("x\n"); print_vector(x.size(), x.data()); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(hipblasCreate(&cublasH)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); CUBLAS_CHECK(hipblasSetStream(cublasH, stream)); /* step 2: copy data to device */ CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size())); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_x), sizeof(data_type) * x.size())); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_y), sizeof(data_type) * y.size())); CUDA_CHECK(hipMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_x, x.data(), sizeof(data_type) * x.size(), hipMemcpyHostToDevice, stream)); /* step 3: compute */ CUBLAS_CHECK( hipblasDgemv(cublasH, transa, m, n, &alpha, d_A, lda, d_x, incx, &beta, d_y, incy)); /* step 4: copy data to host */ CUDA_CHECK(hipMemcpyAsync(y.data(), d_y, sizeof(data_type) * y.size(), hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); /* * y = | 17.00 39.00 | */ printf("y\n"); print_vector(y.size(), y.data()); printf("=====\n"); /* free resources */ CUDA_CHECK(hipFree(d_A)); CUDA_CHECK(hipFree(d_x)); CUDA_CHECK(hipFree(d_y)); CUBLAS_CHECK(hipblasDestroy(cublasH)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
3363947eece610ad7ddac339db128fdc927dada2.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <cublas_v2.h> #include <cuda_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { cublasHandle_t cublasH = NULL; cudaStream_t stream = NULL; const int m = 2; const int n = 2; const int lda = m; /* * A = | 1.0 2.0 | * | 3.0 4.0 | * x = | 5.0 6.0 | */ const std::vector<data_type> A = {1.0, 3.0, 2.0, 4.0}; const std::vector<data_type> x = {5.0, 6.0}; std::vector<data_type> y(m, 0); const data_type alpha = 1.0; const data_type beta = 0.0; const int incx = 1; const int incy = 1; data_type *d_A = nullptr; data_type *d_x = nullptr; data_type *d_y = nullptr; cublasOperation_t transa = CUBLAS_OP_N; printf("A\n"); print_matrix(m, n, A.data(), lda); printf("=====\n"); printf("x\n"); print_vector(x.size(), x.data()); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(cublasCreate(&cublasH)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); CUBLAS_CHECK(cublasSetStream(cublasH, stream)); /* step 2: copy data to device */ CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size())); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_x), sizeof(data_type) * x.size())); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_y), sizeof(data_type) * y.size())); CUDA_CHECK(cudaMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_x, x.data(), sizeof(data_type) * x.size(), cudaMemcpyHostToDevice, stream)); /* step 3: compute */ CUBLAS_CHECK( cublasDgemv(cublasH, transa, m, n, &alpha, d_A, lda, d_x, incx, &beta, d_y, incy)); /* step 4: copy data to host */ CUDA_CHECK(cudaMemcpyAsync(y.data(), d_y, sizeof(data_type) * y.size(), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); /* * y = | 17.00 39.00 | */ printf("y\n"); print_vector(y.size(), y.data()); printf("=====\n"); /* free resources */ CUDA_CHECK(cudaFree(d_A)); CUDA_CHECK(cudaFree(d_x)); CUDA_CHECK(cudaFree(d_y)); CUBLAS_CHECK(cublasDestroy(cublasH)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
21f6d3beca8e0c9ffb7c92e7d4dab5ddc4a0947d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <THHUNN/THHUNN.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceTensor.cuh> #include <THH/THHDeviceTensorUtils.cuh> #include <THH/THHDeviceUtils.cuh> #include <THH/THHNumerics.cuh> #include <THH/THHTensorTypeUtils.cuh> #include <ATen/hip/HIPContext.h> #define OUTPUT_FEATURES_PER_THREAD 32 #define MAX_WARPS_PER_RUN 4 namespace detail { /// Various utilities for dealing with arrays of values which are /// maintained in thread-local registers. All accesses are done in such /// a way such that the index is statically known, which preserves the /// compiler's ability to allocate the values to registers, as opposed /// to local memory. template <typename T, int N> struct RegisterUtils { /// Register shifting: move elements towards the beginning of the /// array (towards 0) by `Shift` places: /// arr[i] = arr[i + Shift] /// The `Shift` elements at the end are left unchanged. template <int Shift> __device__ __forceinline__ static void shiftLeft(T arr[N]) { // e.g., N = 5, Shift = 2: // 0 1 2 3 4 becomes => // 2 3 4 3 4 (last are unchanged) #pragma unroll for (int i = 0; i < N - Shift; ++i) { arr[i] = arr[i + Shift]; } } }; template <typename T> __device__ __forceinline__ int getDim1Point(const THCDeviceTensor<T, 4>& input) { int threadPoint = blockIdx.x * blockDim.x + threadIdx.x; return threadPoint / input.getSize(3); } template <typename T> __device__ __forceinline__ int getDim2Point(const THCDeviceTensor<T, 4>& input) { int threadPoint = blockIdx.x * blockDim.x + threadIdx.x; return threadPoint % input.getSize(3); } __device__ __forceinline__ int getStartOutputFeature() { return blockIdx.y * OUTPUT_FEATURES_PER_THREAD; } template <typename T> __device__ __forceinline__ int getEndOutputFeature(const THCDeviceTensor<T, 4>& output) { return min((blockIdx.y + 1) * OUTPUT_FEATURES_PER_THREAD, output.getSize(1)); } __device__ __forceinline__ int getBatch() { return blockIdx.z; } // All of these functions that follow are MathOps; they are template // parameters so L2 can be more efficiently implemented // template <typename T> // typedef T (*MathOp)(const T in, const T arg); template <typename T> __device__ __forceinline__ T power2(const T in, const T power) { return THCNumerics<T>::mul(in, in); } template <typename T> __device__ __forceinline__ T root2(const T in, const T power) { return THCNumerics<T>::sqrt(in); } template <typename T> __device__ __forceinline__ T powerGrad2(const T in, const T power) { return in; } template <typename T> __device__ __forceinline__ T powerN(const T in, const T power) { return THCNumerics<T>::pow(in, power); } template <typename T> __device__ __forceinline__ T rootN(const T in, const T power) { const T invPower = THCNumerics<T>::cinv(power); return THCNumerics<T>::pow(in, invPower); } template <typename T> __device__ __forceinline__ T powerGradN(const T in, const T power) { return THCNumerics<T>::pow(in, THCNumerics<T>::sub(power, ScalarConvert<int, T>::to(1))); } // Input is of the form: // [batch][feature dim][optional dim 1][optional dim 2] template <typename T, int Width, int Stride, T (*PowerFunc)(T in, T power), T (*RootFunc)(T in, T power)> __global__ void featureLPPoolingUpdateOutput(const THCDeviceTensor<T, 4> input, THCDeviceTensor<T, 4> output, T power) { // What non-feature points is this thread handling? int dim1Point = getDim1Point(input); int dim2Point = getDim2Point(input); if (dim1Point >= input.getSize(2) || dim2Point >= input.getSize(3)) { // This thread in the warp is out of bounds return; } // What feature points is this thread handling? int startOutputFeature = getStartOutputFeature(); int endOutputFeature = getEndOutputFeature(output); int startInputFeature = startOutputFeature * Stride; // What batch points is this thread handling? int batch = getBatch(); // If stride >= width, then there is no loaded data reuse. // If stride > 1 and stride < width, then shift by stride, since we // can reuse Width - Stride elements from the previous round. // e.g., width = 5, stride = 2, // output 0 uses input 0 1 2 3 4 // output 1 uses input 2 3 4 5 6 (inputs 2 - 4 are reused, i.e., 5 - // 2 elements are reused, and we have to shift the array by 2) // // e.g., width = 5, stride = 3, // output 0 uses input 0 1 2 3 4 // output 1 uses input 3 4 5 6 7 (inputs 3 - 4 are reused, i.e., 5 - 3 // elements are reused, and we have to shift the array by 3) // Valid only pooling: load Width elements from input (Width - // Stride is handled here, at the top of the loop we handle the // remaining Stride elements). We already verified that the input is // larger than the width. // `in` will contain the input values ^ power. T in[Width]; #pragma unroll for (int i = 0; i < Width - Stride; ++i) { const T data = input[batch][startInputFeature + i][dim1Point][dim2Point]; in[i] = PowerFunc(data, power); } for (int outputFeature = startOutputFeature; outputFeature < endOutputFeature; ++outputFeature) { // If Stride < Width, we're loading Stride new values starting at // Width - Stride // If Stride >= Width, we're loading Width new values starting at 0 if (Stride < Width) { int nextInputFeature = outputFeature * Stride + Width - Stride; #pragma unroll for (int i = 0; i < Stride; ++i) { const T data = input[batch][nextInputFeature + i][dim1Point][dim2Point]; in[Width - Stride + i] = PowerFunc(data, power); } } else { int nextInputFeature = outputFeature * Stride; #pragma unroll for (int i = 0; i < Width; ++i) { T data = input[batch][nextInputFeature + i][dim1Point][dim2Point]; in[i] = PowerFunc(data, power); } } // Calculate the new output feature T val = ScalarConvert<int, T>::to(0); for (int i = 0; i < Width; ++i) { val = THCNumerics<T>::add(val, in[i]); } val = RootFunc(val, power); output[batch][outputFeature][dim1Point][dim2Point] = val; if (Stride < Width) { // Shift registers for calculating the next point RegisterUtils<T, Width>::template shiftLeft<Stride>(in); } } } // forward pass: f(a, ..., z) = (a^p + ... + z^p)^(1 / p) // for bprop: // partial df(a, ... z)/da = a^(p - 1) * (a^p + ... + z^p)^((1 / p) - 1) = // a^(p - 1) * 1/(f(a, ..., z)^(p - 1)) = (a / f(a, ..., z))^(p - 1) // // example: for p = 2, df(a, ..., z)/da = a / f(a, ..., z) // example: for p = 3, df(a, ..., z)/da = (a / f(a, ..., z))^2 // // PowerGradFunc implements x^(p - 1) template <typename T, int Width, int Stride, T (*PowerGradFunc)(T in, T arg)> __global__ void featureLPPoolingUpdateGradInput(const THCDeviceTensor<T, 4> gradOutput, const THCDeviceTensor<T, 4> input, const THCDeviceTensor<T, 4> output, THCDeviceTensor<T, 4> gradInput, T power) { // What non-feature points is this thread handling? int dim1Point = getDim1Point(input); int dim2Point = getDim2Point(input); if (dim1Point >= input.getSize(2) || dim2Point >= input.getSize(3)) { // This thread in the warp is out of bounds return; } // What feature points is this thread handling? [start, end) int startOutputFeature = getStartOutputFeature(); int endOutputFeature = getEndOutputFeature(output); // What is the first input point that the output features depend // upon? [start, end) int startInputFeature = startOutputFeature * Stride; int endInputFeature = endOutputFeature * Stride; // What batch points is this thread handling? int batch = getBatch(); // atomicAdd into gradInput is slow, avoid it where possible. // We can do this because there is a range of gradInput elements // that we are updating exclusively. This is how we find it // // width = 3 stride = 1 example: // ------------------------------ // startOutputFeature for this thread // | // | // previous thread's output feature // | | // | | gradOutput // __v____v___________________ // | | | | | | // --------------------------- // |\ \_____ // | \__ \ gradInput // __v____v____v_____________ // | | | | | | // --------------------------- // A A // | | // startInputFeature // | // exclusiveStartInputFeature // // exclusiveStartInputFeature is the first input feature that we can // write into exclusively; the one right before it overlaps with // updates from a previous thread and thus has to use atomicAdd. int exclusiveStartInputFeature = startInputFeature == 0 ? // no thread is before ourselves 0 : // there is a thread before ourselves startInputFeature + (Width - 1) * Stride; // Similarly, exclusiveEndInputFeature is the last input feature // that we can write into exclusively, since we might be overlapping // with the following thread int exclusiveEndInputFeature = endOutputFeature == output.getSize(1) ? // no thread is after ourselves endInputFeature + (Width - 1) * Stride : // there is a thread after ourselves endInputFeature; // As with updateOutput preload input elements, except no need to // transform them T in[Width]; #pragma unroll for (int i = 0; i < Width - Stride; ++i) { in[i] = input[batch][startInputFeature + i][dim1Point][dim2Point]; } for (int outputFeature = startOutputFeature; outputFeature < endOutputFeature; ++outputFeature) { // As with updateOutput load the subsequent input elements that we // need, except no need to transform them // // If Stride < Width, we're loading Stride new values starting at // Width - Stride // If Stride >= Width, we're loading Width new values starting at 0 if (Stride < Width) { int nextInputFeature = outputFeature * Stride + Width - Stride; #pragma unroll for (int i = 0; i < Stride; ++i) { in[Width - Stride + i] = input[batch][nextInputFeature + i][dim1Point][dim2Point]; } } else { int nextInputFeature = outputFeature * Stride; #pragma unroll for (int i = 0; i < Width; ++i) { in[i] = input[batch][nextInputFeature + i][dim1Point][dim2Point]; } } // A given output feature gradient contributes to `Width` input // gradients const T gradOut = gradOutput[batch][outputFeature][dim1Point][dim2Point]; // Load output (f(x_is)). It is possible that this is zero, in // which case we'll ignore this point. T out = output[batch][outputFeature][dim1Point][dim2Point]; if (THCNumerics<T>::eq(out, ScalarConvert<int, T>::to(0))) { continue; } int curStartInputFeature = outputFeature * Stride; int curEndInputFeature = outputFeature * Stride + Width - 1; if (curStartInputFeature >= exclusiveStartInputFeature && curEndInputFeature < exclusiveEndInputFeature) { // This thread is exclusively responsible for updating these // input points, so we need not make the addition atomic for (int i = 0; i < Width; ++i) { int inputFeature = outputFeature * Stride + i; // Calculate grad * (x_i / f(x_is))^(p - 1) const T val = THCNumerics<T>::mul( gradOut, PowerGradFunc(THCNumerics<T>::div(in[i], out), power)); gradInput[batch][inputFeature][dim1Point][dim2Point] = THCNumerics<T>::add( gradInput[batch][inputFeature][dim1Point][dim2Point], val); } } else { // Handle start and end boundary cases: potential overlap with // other threads for (int i = 0; i < Width; ++i) { int inputFeature = outputFeature * Stride + i; // Calculate grad * (x_i / f(x_is))^(p - 1) T val = THCNumerics<T>::mul( gradOut, PowerGradFunc(THCNumerics<T>::div(in[i], out), power)); // We don't overlap other threads for this range if (inputFeature >= exclusiveStartInputFeature && inputFeature < exclusiveEndInputFeature) { gradInput[batch][inputFeature][dim1Point][dim2Point] = THCNumerics<T>::add( gradInput[batch][inputFeature][dim1Point][dim2Point], val); } else { // We are potentially overlapping with threads handling // features before ourselves, so these need to be added atomically atomicAdd(&gradInput[batch][inputFeature][dim1Point][dim2Point], val); } } } if (Stride < Width) { // Shift registers for calculating the next point RegisterUtils<T, Width>::template shiftLeft<Stride>(in); } } } } // namespace detail inline int lpPoolingOutputSize(int inputSize, int width, int stride) { return ((inputSize - width) / stride) + 1; } template <typename T> bool runFeatureLPPoolingUpdateOutput(THCState* state, const THCDeviceTensor<T, 4>& input, THCDeviceTensor<T, 4>& output, float power, int width, int stride) { hipStream_t stream = THCState_getCurrentStream(state); const hipDeviceProp_t* deviceProperties = at::cuda::getCurrentDeviceProperties(); int outputFeatures = ((input.getSize(1) - width) / stride) + 1; THAssert(input.getSize(0) == output.getSize(0)); THAssert(outputFeatures == output.getSize(1)); THAssert(input.getSize(1) >= width); THAssert(input.getSize(2) == output.getSize(2)); THAssert(input.getSize(3) == output.getSize(3)); THAssert(power > 0.0f); THAssert(width >= 1); THAssert(stride >= 1); // Split non-features among threads and grid x int totalNonFeatureSize = input.getSize(2) * input.getSize(3); int numWarps = min(THCCeilDiv(totalNonFeatureSize, deviceProperties->warpSize), MAX_WARPS_PER_RUN); int blockSize = deviceProperties->warpSize * numWarps; // Split non-features among grid x int nonFeatureSizeBlocks = THCCeilDiv(totalNonFeatureSize, blockSize); // Split features among grid y, up to a maximum number of features per thread int featureBlocks = THCCeilDiv(outputFeatures, OUTPUT_FEATURES_PER_THREAD); // Split batch among grid z. dim3 grid(nonFeatureSizeBlocks, featureBlocks, input.getSize(0)); dim3 block(blockSize); #define L2_STRIDE_CASE(STRIDE, WIDTH) \ case STRIDE: \ hipLaunchKernelGGL(( detail::featureLPPoolingUpdateOutput<T, WIDTH, \ STRIDE, \ detail::power2, \ detail::root2>), dim3(grid), dim3(block), 0, stream, \ input, output, \ ScalarConvert<float, T>::to(power)); \ return true; #define L2_WIDTH_CASE(WIDTH) \ case WIDTH: \ switch (stride) { \ L2_STRIDE_CASE(1, WIDTH); \ L2_STRIDE_CASE(2, WIDTH); \ L2_STRIDE_CASE(3, WIDTH); \ L2_STRIDE_CASE(4, WIDTH); \ } \ break; #define LP_STRIDE_CASE(STRIDE, WIDTH) \ case STRIDE: \ hipLaunchKernelGGL(( detail::featureLPPoolingUpdateOutput<T, WIDTH, \ STRIDE, \ detail::powerN, \ detail::rootN>), dim3(grid), dim3(block), 0, stream, \ input, output, \ ScalarConvert<float, T>::to(power)); \ return true; #define LP_WIDTH_CASE(WIDTH) \ case WIDTH: \ switch (stride) { \ LP_STRIDE_CASE(1, WIDTH); \ LP_STRIDE_CASE(2, WIDTH); \ LP_STRIDE_CASE(3, WIDTH); \ LP_STRIDE_CASE(4, WIDTH); \ } \ break; if (power == 2.0f) { switch (width) { L2_WIDTH_CASE(2); L2_WIDTH_CASE(3); L2_WIDTH_CASE(4); L2_WIDTH_CASE(5); L2_WIDTH_CASE(6); L2_WIDTH_CASE(7); L2_WIDTH_CASE(8); L2_WIDTH_CASE(9); L2_WIDTH_CASE(10); L2_WIDTH_CASE(11); L2_WIDTH_CASE(12); L2_WIDTH_CASE(13); L2_WIDTH_CASE(14); L2_WIDTH_CASE(15); L2_WIDTH_CASE(16); } } else { switch (width) { LP_WIDTH_CASE(2); LP_WIDTH_CASE(3); LP_WIDTH_CASE(4); LP_WIDTH_CASE(5); LP_WIDTH_CASE(6); LP_WIDTH_CASE(7); LP_WIDTH_CASE(8); LP_WIDTH_CASE(9); LP_WIDTH_CASE(10); LP_WIDTH_CASE(11); LP_WIDTH_CASE(12); LP_WIDTH_CASE(13); LP_WIDTH_CASE(14); LP_WIDTH_CASE(15); LP_WIDTH_CASE(16); } } // Otherwise, we have an unhandled width and/or stride. return false; #undef L2_STRIDE_CASE #undef L2_WIDTH_CASE #undef LP_STRIDE_CASE #undef LP_WIDTH_CASE } template <typename T> bool runFeatureLPPoolingUpdateGradInput(THCState* state, const THCDeviceTensor<T, 4>& gradOutput, const THCDeviceTensor<T, 4>& input, const THCDeviceTensor<T, 4>& output, THCDeviceTensor<T, 4>& gradInput, float power, int width, int stride) { hipStream_t stream = THCState_getCurrentStream(state); const hipDeviceProp_t* deviceProperties = at::cuda::getCurrentDeviceProperties(); for (int i = 0; i < 4; ++i) { THAssert(gradOutput.getSize(i) == output.getSize(i)); THAssert(gradInput.getSize(i) == input.getSize(i)); } int outputFeatures = ((input.getSize(1) - width) / stride) + 1; THAssert(gradInput.getSize(0) == gradOutput.getSize(0)); THAssert(outputFeatures == gradOutput.getSize(1)); THAssert(gradInput.getSize(1) >= width); THAssert(gradInput.getSize(2) == gradOutput.getSize(2)); THAssert(gradInput.getSize(3) == gradOutput.getSize(3)); THAssert(power > 0.0f); THAssert(width >= 1); THAssert(stride >= 1); // Different threads are potentially adding into overlapping input // points, so we must clear out gradInput before continuing. gradInput.zero(stream); // Split non-features among threads and grid x int totalNonFeatureSize = input.getSize(2) * input.getSize(3); int numWarps = min(THCCeilDiv(totalNonFeatureSize, deviceProperties->warpSize), MAX_WARPS_PER_RUN); int blockSize = deviceProperties->warpSize * numWarps; // Split non-features among grid x int nonFeatureSizeBlocks = THCCeilDiv(totalNonFeatureSize, blockSize); // Split features among grid y, up to a maximum number of features per thread int featureBlocks = THCCeilDiv(outputFeatures, OUTPUT_FEATURES_PER_THREAD); // Split batch among grid z. dim3 grid(nonFeatureSizeBlocks, featureBlocks, input.getSize(0)); dim3 block(blockSize); #define L2_STRIDE_CASE(STRIDE, WIDTH) \ case STRIDE: \ hipLaunchKernelGGL(( detail::featureLPPoolingUpdateGradInput< \ T, WIDTH, STRIDE, detail::powerGrad2>), dim3(grid), dim3(block), 0, stream, \ gradOutput, input, output, gradInput, \ ScalarConvert<float, T>::to(power)); \ return true; #define L2_WIDTH_CASE(WIDTH) \ case WIDTH: \ switch (stride) { \ L2_STRIDE_CASE(1, WIDTH); \ L2_STRIDE_CASE(2, WIDTH); \ L2_STRIDE_CASE(3, WIDTH); \ L2_STRIDE_CASE(4, WIDTH); \ } \ break; #define LP_STRIDE_CASE(STRIDE, WIDTH) \ case STRIDE: \ hipLaunchKernelGGL(( detail::featureLPPoolingUpdateGradInput< \ T, WIDTH, STRIDE, detail::powerGradN>), dim3(grid), dim3(block), 0, stream, \ gradOutput, input, output, gradInput, \ ScalarConvert<float, T>::to(power)); \ return true; #define LP_WIDTH_CASE(WIDTH) \ case WIDTH: \ switch (stride) { \ LP_STRIDE_CASE(1, WIDTH); \ LP_STRIDE_CASE(2, WIDTH); \ LP_STRIDE_CASE(3, WIDTH); \ LP_STRIDE_CASE(4, WIDTH); \ } \ break; if (power == 2.0f) { switch (width) { L2_WIDTH_CASE(2); L2_WIDTH_CASE(3); L2_WIDTH_CASE(4); L2_WIDTH_CASE(5); L2_WIDTH_CASE(6); L2_WIDTH_CASE(7); L2_WIDTH_CASE(8); L2_WIDTH_CASE(9); L2_WIDTH_CASE(10); L2_WIDTH_CASE(11); L2_WIDTH_CASE(12); L2_WIDTH_CASE(13); L2_WIDTH_CASE(14); L2_WIDTH_CASE(15); L2_WIDTH_CASE(16); } } else { switch (width) { LP_WIDTH_CASE(2); LP_WIDTH_CASE(3); LP_WIDTH_CASE(4); LP_WIDTH_CASE(5); LP_WIDTH_CASE(6); LP_WIDTH_CASE(7); LP_WIDTH_CASE(8); LP_WIDTH_CASE(9); LP_WIDTH_CASE(10); LP_WIDTH_CASE(11); LP_WIDTH_CASE(12); LP_WIDTH_CASE(13); LP_WIDTH_CASE(14); LP_WIDTH_CASE(15); LP_WIDTH_CASE(16); } } // Otherwise, we have an unhandled width and/or stride. return false; #undef L2_STRIDE_CASE #undef L2_WIDTH_CASE #undef LP_STRIDE_CASE #undef LP_WIDTH_CASE } #include <THHUNN/generic/FeatureLPPooling.hip> #include <THH/THHGenerateFloatTypes.h>
21f6d3beca8e0c9ffb7c92e7d4dab5ddc4a0947d.cu
#include <THCUNN/THCUNN.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceTensor.cuh> #include <THC/THCDeviceTensorUtils.cuh> #include <THC/THCDeviceUtils.cuh> #include <THC/THCNumerics.cuh> #include <THC/THCTensorTypeUtils.cuh> #include <ATen/cuda/CUDAContext.h> #define OUTPUT_FEATURES_PER_THREAD 32 #define MAX_WARPS_PER_RUN 4 namespace detail { /// Various utilities for dealing with arrays of values which are /// maintained in thread-local registers. All accesses are done in such /// a way such that the index is statically known, which preserves the /// compiler's ability to allocate the values to registers, as opposed /// to local memory. template <typename T, int N> struct RegisterUtils { /// Register shifting: move elements towards the beginning of the /// array (towards 0) by `Shift` places: /// arr[i] = arr[i + Shift] /// The `Shift` elements at the end are left unchanged. template <int Shift> __device__ __forceinline__ static void shiftLeft(T arr[N]) { // e.g., N = 5, Shift = 2: // 0 1 2 3 4 becomes => // 2 3 4 3 4 (last are unchanged) #pragma unroll for (int i = 0; i < N - Shift; ++i) { arr[i] = arr[i + Shift]; } } }; template <typename T> __device__ __forceinline__ int getDim1Point(const THCDeviceTensor<T, 4>& input) { int threadPoint = blockIdx.x * blockDim.x + threadIdx.x; return threadPoint / input.getSize(3); } template <typename T> __device__ __forceinline__ int getDim2Point(const THCDeviceTensor<T, 4>& input) { int threadPoint = blockIdx.x * blockDim.x + threadIdx.x; return threadPoint % input.getSize(3); } __device__ __forceinline__ int getStartOutputFeature() { return blockIdx.y * OUTPUT_FEATURES_PER_THREAD; } template <typename T> __device__ __forceinline__ int getEndOutputFeature(const THCDeviceTensor<T, 4>& output) { return min((blockIdx.y + 1) * OUTPUT_FEATURES_PER_THREAD, output.getSize(1)); } __device__ __forceinline__ int getBatch() { return blockIdx.z; } // All of these functions that follow are MathOps; they are template // parameters so L2 can be more efficiently implemented // template <typename T> // typedef T (*MathOp)(const T in, const T arg); template <typename T> __device__ __forceinline__ T power2(const T in, const T power) { return THCNumerics<T>::mul(in, in); } template <typename T> __device__ __forceinline__ T root2(const T in, const T power) { return THCNumerics<T>::sqrt(in); } template <typename T> __device__ __forceinline__ T powerGrad2(const T in, const T power) { return in; } template <typename T> __device__ __forceinline__ T powerN(const T in, const T power) { return THCNumerics<T>::pow(in, power); } template <typename T> __device__ __forceinline__ T rootN(const T in, const T power) { const T invPower = THCNumerics<T>::cinv(power); return THCNumerics<T>::pow(in, invPower); } template <typename T> __device__ __forceinline__ T powerGradN(const T in, const T power) { return THCNumerics<T>::pow(in, THCNumerics<T>::sub(power, ScalarConvert<int, T>::to(1))); } // Input is of the form: // [batch][feature dim][optional dim 1][optional dim 2] template <typename T, int Width, int Stride, T (*PowerFunc)(T in, T power), T (*RootFunc)(T in, T power)> __global__ void featureLPPoolingUpdateOutput(const THCDeviceTensor<T, 4> input, THCDeviceTensor<T, 4> output, T power) { // What non-feature points is this thread handling? int dim1Point = getDim1Point(input); int dim2Point = getDim2Point(input); if (dim1Point >= input.getSize(2) || dim2Point >= input.getSize(3)) { // This thread in the warp is out of bounds return; } // What feature points is this thread handling? int startOutputFeature = getStartOutputFeature(); int endOutputFeature = getEndOutputFeature(output); int startInputFeature = startOutputFeature * Stride; // What batch points is this thread handling? int batch = getBatch(); // If stride >= width, then there is no loaded data reuse. // If stride > 1 and stride < width, then shift by stride, since we // can reuse Width - Stride elements from the previous round. // e.g., width = 5, stride = 2, // output 0 uses input 0 1 2 3 4 // output 1 uses input 2 3 4 5 6 (inputs 2 - 4 are reused, i.e., 5 - // 2 elements are reused, and we have to shift the array by 2) // // e.g., width = 5, stride = 3, // output 0 uses input 0 1 2 3 4 // output 1 uses input 3 4 5 6 7 (inputs 3 - 4 are reused, i.e., 5 - 3 // elements are reused, and we have to shift the array by 3) // Valid only pooling: load Width elements from input (Width - // Stride is handled here, at the top of the loop we handle the // remaining Stride elements). We already verified that the input is // larger than the width. // `in` will contain the input values ^ power. T in[Width]; #pragma unroll for (int i = 0; i < Width - Stride; ++i) { const T data = input[batch][startInputFeature + i][dim1Point][dim2Point]; in[i] = PowerFunc(data, power); } for (int outputFeature = startOutputFeature; outputFeature < endOutputFeature; ++outputFeature) { // If Stride < Width, we're loading Stride new values starting at // Width - Stride // If Stride >= Width, we're loading Width new values starting at 0 if (Stride < Width) { int nextInputFeature = outputFeature * Stride + Width - Stride; #pragma unroll for (int i = 0; i < Stride; ++i) { const T data = input[batch][nextInputFeature + i][dim1Point][dim2Point]; in[Width - Stride + i] = PowerFunc(data, power); } } else { int nextInputFeature = outputFeature * Stride; #pragma unroll for (int i = 0; i < Width; ++i) { T data = input[batch][nextInputFeature + i][dim1Point][dim2Point]; in[i] = PowerFunc(data, power); } } // Calculate the new output feature T val = ScalarConvert<int, T>::to(0); for (int i = 0; i < Width; ++i) { val = THCNumerics<T>::add(val, in[i]); } val = RootFunc(val, power); output[batch][outputFeature][dim1Point][dim2Point] = val; if (Stride < Width) { // Shift registers for calculating the next point RegisterUtils<T, Width>::template shiftLeft<Stride>(in); } } } // forward pass: f(a, ..., z) = (a^p + ... + z^p)^(1 / p) // for bprop: // partial df(a, ... z)/da = a^(p - 1) * (a^p + ... + z^p)^((1 / p) - 1) = // a^(p - 1) * 1/(f(a, ..., z)^(p - 1)) = (a / f(a, ..., z))^(p - 1) // // example: for p = 2, df(a, ..., z)/da = a / f(a, ..., z) // example: for p = 3, df(a, ..., z)/da = (a / f(a, ..., z))^2 // // PowerGradFunc implements x^(p - 1) template <typename T, int Width, int Stride, T (*PowerGradFunc)(T in, T arg)> __global__ void featureLPPoolingUpdateGradInput(const THCDeviceTensor<T, 4> gradOutput, const THCDeviceTensor<T, 4> input, const THCDeviceTensor<T, 4> output, THCDeviceTensor<T, 4> gradInput, T power) { // What non-feature points is this thread handling? int dim1Point = getDim1Point(input); int dim2Point = getDim2Point(input); if (dim1Point >= input.getSize(2) || dim2Point >= input.getSize(3)) { // This thread in the warp is out of bounds return; } // What feature points is this thread handling? [start, end) int startOutputFeature = getStartOutputFeature(); int endOutputFeature = getEndOutputFeature(output); // What is the first input point that the output features depend // upon? [start, end) int startInputFeature = startOutputFeature * Stride; int endInputFeature = endOutputFeature * Stride; // What batch points is this thread handling? int batch = getBatch(); // atomicAdd into gradInput is slow, avoid it where possible. // We can do this because there is a range of gradInput elements // that we are updating exclusively. This is how we find it // // width = 3 stride = 1 example: // ------------------------------ // startOutputFeature for this thread // | // | // previous thread's output feature // | | // | | gradOutput // __v____v___________________ // | | | | | | // --------------------------- // |\ \_____ // | \__ \ gradInput // __v____v____v_____________ // | | | | | | // --------------------------- // A A // | | // startInputFeature // | // exclusiveStartInputFeature // // exclusiveStartInputFeature is the first input feature that we can // write into exclusively; the one right before it overlaps with // updates from a previous thread and thus has to use atomicAdd. int exclusiveStartInputFeature = startInputFeature == 0 ? // no thread is before ourselves 0 : // there is a thread before ourselves startInputFeature + (Width - 1) * Stride; // Similarly, exclusiveEndInputFeature is the last input feature // that we can write into exclusively, since we might be overlapping // with the following thread int exclusiveEndInputFeature = endOutputFeature == output.getSize(1) ? // no thread is after ourselves endInputFeature + (Width - 1) * Stride : // there is a thread after ourselves endInputFeature; // As with updateOutput preload input elements, except no need to // transform them T in[Width]; #pragma unroll for (int i = 0; i < Width - Stride; ++i) { in[i] = input[batch][startInputFeature + i][dim1Point][dim2Point]; } for (int outputFeature = startOutputFeature; outputFeature < endOutputFeature; ++outputFeature) { // As with updateOutput load the subsequent input elements that we // need, except no need to transform them // // If Stride < Width, we're loading Stride new values starting at // Width - Stride // If Stride >= Width, we're loading Width new values starting at 0 if (Stride < Width) { int nextInputFeature = outputFeature * Stride + Width - Stride; #pragma unroll for (int i = 0; i < Stride; ++i) { in[Width - Stride + i] = input[batch][nextInputFeature + i][dim1Point][dim2Point]; } } else { int nextInputFeature = outputFeature * Stride; #pragma unroll for (int i = 0; i < Width; ++i) { in[i] = input[batch][nextInputFeature + i][dim1Point][dim2Point]; } } // A given output feature gradient contributes to `Width` input // gradients const T gradOut = gradOutput[batch][outputFeature][dim1Point][dim2Point]; // Load output (f(x_is)). It is possible that this is zero, in // which case we'll ignore this point. T out = output[batch][outputFeature][dim1Point][dim2Point]; if (THCNumerics<T>::eq(out, ScalarConvert<int, T>::to(0))) { continue; } int curStartInputFeature = outputFeature * Stride; int curEndInputFeature = outputFeature * Stride + Width - 1; if (curStartInputFeature >= exclusiveStartInputFeature && curEndInputFeature < exclusiveEndInputFeature) { // This thread is exclusively responsible for updating these // input points, so we need not make the addition atomic for (int i = 0; i < Width; ++i) { int inputFeature = outputFeature * Stride + i; // Calculate grad * (x_i / f(x_is))^(p - 1) const T val = THCNumerics<T>::mul( gradOut, PowerGradFunc(THCNumerics<T>::div(in[i], out), power)); gradInput[batch][inputFeature][dim1Point][dim2Point] = THCNumerics<T>::add( gradInput[batch][inputFeature][dim1Point][dim2Point], val); } } else { // Handle start and end boundary cases: potential overlap with // other threads for (int i = 0; i < Width; ++i) { int inputFeature = outputFeature * Stride + i; // Calculate grad * (x_i / f(x_is))^(p - 1) T val = THCNumerics<T>::mul( gradOut, PowerGradFunc(THCNumerics<T>::div(in[i], out), power)); // We don't overlap other threads for this range if (inputFeature >= exclusiveStartInputFeature && inputFeature < exclusiveEndInputFeature) { gradInput[batch][inputFeature][dim1Point][dim2Point] = THCNumerics<T>::add( gradInput[batch][inputFeature][dim1Point][dim2Point], val); } else { // We are potentially overlapping with threads handling // features before ourselves, so these need to be added atomically atomicAdd(&gradInput[batch][inputFeature][dim1Point][dim2Point], val); } } } if (Stride < Width) { // Shift registers for calculating the next point RegisterUtils<T, Width>::template shiftLeft<Stride>(in); } } } } // namespace detail inline int lpPoolingOutputSize(int inputSize, int width, int stride) { return ((inputSize - width) / stride) + 1; } template <typename T> bool runFeatureLPPoolingUpdateOutput(THCState* state, const THCDeviceTensor<T, 4>& input, THCDeviceTensor<T, 4>& output, float power, int width, int stride) { cudaStream_t stream = THCState_getCurrentStream(state); const cudaDeviceProp* deviceProperties = at::cuda::getCurrentDeviceProperties(); int outputFeatures = ((input.getSize(1) - width) / stride) + 1; THAssert(input.getSize(0) == output.getSize(0)); THAssert(outputFeatures == output.getSize(1)); THAssert(input.getSize(1) >= width); THAssert(input.getSize(2) == output.getSize(2)); THAssert(input.getSize(3) == output.getSize(3)); THAssert(power > 0.0f); THAssert(width >= 1); THAssert(stride >= 1); // Split non-features among threads and grid x int totalNonFeatureSize = input.getSize(2) * input.getSize(3); int numWarps = min(THCCeilDiv(totalNonFeatureSize, deviceProperties->warpSize), MAX_WARPS_PER_RUN); int blockSize = deviceProperties->warpSize * numWarps; // Split non-features among grid x int nonFeatureSizeBlocks = THCCeilDiv(totalNonFeatureSize, blockSize); // Split features among grid y, up to a maximum number of features per thread int featureBlocks = THCCeilDiv(outputFeatures, OUTPUT_FEATURES_PER_THREAD); // Split batch among grid z. dim3 grid(nonFeatureSizeBlocks, featureBlocks, input.getSize(0)); dim3 block(blockSize); #define L2_STRIDE_CASE(STRIDE, WIDTH) \ case STRIDE: \ detail:: \ featureLPPoolingUpdateOutput<T, WIDTH, \ STRIDE, \ detail::power2, \ detail::root2><<<grid, block, 0, stream>>>( \ input, output, \ ScalarConvert<float, T>::to(power)); \ return true; #define L2_WIDTH_CASE(WIDTH) \ case WIDTH: \ switch (stride) { \ L2_STRIDE_CASE(1, WIDTH); \ L2_STRIDE_CASE(2, WIDTH); \ L2_STRIDE_CASE(3, WIDTH); \ L2_STRIDE_CASE(4, WIDTH); \ } \ break; #define LP_STRIDE_CASE(STRIDE, WIDTH) \ case STRIDE: \ detail:: \ featureLPPoolingUpdateOutput<T, WIDTH, \ STRIDE, \ detail::powerN, \ detail::rootN><<<grid, block, 0, stream>>>( \ input, output, \ ScalarConvert<float, T>::to(power)); \ return true; #define LP_WIDTH_CASE(WIDTH) \ case WIDTH: \ switch (stride) { \ LP_STRIDE_CASE(1, WIDTH); \ LP_STRIDE_CASE(2, WIDTH); \ LP_STRIDE_CASE(3, WIDTH); \ LP_STRIDE_CASE(4, WIDTH); \ } \ break; if (power == 2.0f) { switch (width) { L2_WIDTH_CASE(2); L2_WIDTH_CASE(3); L2_WIDTH_CASE(4); L2_WIDTH_CASE(5); L2_WIDTH_CASE(6); L2_WIDTH_CASE(7); L2_WIDTH_CASE(8); L2_WIDTH_CASE(9); L2_WIDTH_CASE(10); L2_WIDTH_CASE(11); L2_WIDTH_CASE(12); L2_WIDTH_CASE(13); L2_WIDTH_CASE(14); L2_WIDTH_CASE(15); L2_WIDTH_CASE(16); } } else { switch (width) { LP_WIDTH_CASE(2); LP_WIDTH_CASE(3); LP_WIDTH_CASE(4); LP_WIDTH_CASE(5); LP_WIDTH_CASE(6); LP_WIDTH_CASE(7); LP_WIDTH_CASE(8); LP_WIDTH_CASE(9); LP_WIDTH_CASE(10); LP_WIDTH_CASE(11); LP_WIDTH_CASE(12); LP_WIDTH_CASE(13); LP_WIDTH_CASE(14); LP_WIDTH_CASE(15); LP_WIDTH_CASE(16); } } // Otherwise, we have an unhandled width and/or stride. return false; #undef L2_STRIDE_CASE #undef L2_WIDTH_CASE #undef LP_STRIDE_CASE #undef LP_WIDTH_CASE } template <typename T> bool runFeatureLPPoolingUpdateGradInput(THCState* state, const THCDeviceTensor<T, 4>& gradOutput, const THCDeviceTensor<T, 4>& input, const THCDeviceTensor<T, 4>& output, THCDeviceTensor<T, 4>& gradInput, float power, int width, int stride) { cudaStream_t stream = THCState_getCurrentStream(state); const cudaDeviceProp* deviceProperties = at::cuda::getCurrentDeviceProperties(); for (int i = 0; i < 4; ++i) { THAssert(gradOutput.getSize(i) == output.getSize(i)); THAssert(gradInput.getSize(i) == input.getSize(i)); } int outputFeatures = ((input.getSize(1) - width) / stride) + 1; THAssert(gradInput.getSize(0) == gradOutput.getSize(0)); THAssert(outputFeatures == gradOutput.getSize(1)); THAssert(gradInput.getSize(1) >= width); THAssert(gradInput.getSize(2) == gradOutput.getSize(2)); THAssert(gradInput.getSize(3) == gradOutput.getSize(3)); THAssert(power > 0.0f); THAssert(width >= 1); THAssert(stride >= 1); // Different threads are potentially adding into overlapping input // points, so we must clear out gradInput before continuing. gradInput.zero(stream); // Split non-features among threads and grid x int totalNonFeatureSize = input.getSize(2) * input.getSize(3); int numWarps = min(THCCeilDiv(totalNonFeatureSize, deviceProperties->warpSize), MAX_WARPS_PER_RUN); int blockSize = deviceProperties->warpSize * numWarps; // Split non-features among grid x int nonFeatureSizeBlocks = THCCeilDiv(totalNonFeatureSize, blockSize); // Split features among grid y, up to a maximum number of features per thread int featureBlocks = THCCeilDiv(outputFeatures, OUTPUT_FEATURES_PER_THREAD); // Split batch among grid z. dim3 grid(nonFeatureSizeBlocks, featureBlocks, input.getSize(0)); dim3 block(blockSize); #define L2_STRIDE_CASE(STRIDE, WIDTH) \ case STRIDE: \ detail:: \ featureLPPoolingUpdateGradInput< \ T, WIDTH, STRIDE, detail::powerGrad2><<<grid, block, 0, stream>>>( \ gradOutput, input, output, gradInput, \ ScalarConvert<float, T>::to(power)); \ return true; #define L2_WIDTH_CASE(WIDTH) \ case WIDTH: \ switch (stride) { \ L2_STRIDE_CASE(1, WIDTH); \ L2_STRIDE_CASE(2, WIDTH); \ L2_STRIDE_CASE(3, WIDTH); \ L2_STRIDE_CASE(4, WIDTH); \ } \ break; #define LP_STRIDE_CASE(STRIDE, WIDTH) \ case STRIDE: \ detail:: \ featureLPPoolingUpdateGradInput< \ T, WIDTH, STRIDE, detail::powerGradN><<<grid, block, 0, stream>>>( \ gradOutput, input, output, gradInput, \ ScalarConvert<float, T>::to(power)); \ return true; #define LP_WIDTH_CASE(WIDTH) \ case WIDTH: \ switch (stride) { \ LP_STRIDE_CASE(1, WIDTH); \ LP_STRIDE_CASE(2, WIDTH); \ LP_STRIDE_CASE(3, WIDTH); \ LP_STRIDE_CASE(4, WIDTH); \ } \ break; if (power == 2.0f) { switch (width) { L2_WIDTH_CASE(2); L2_WIDTH_CASE(3); L2_WIDTH_CASE(4); L2_WIDTH_CASE(5); L2_WIDTH_CASE(6); L2_WIDTH_CASE(7); L2_WIDTH_CASE(8); L2_WIDTH_CASE(9); L2_WIDTH_CASE(10); L2_WIDTH_CASE(11); L2_WIDTH_CASE(12); L2_WIDTH_CASE(13); L2_WIDTH_CASE(14); L2_WIDTH_CASE(15); L2_WIDTH_CASE(16); } } else { switch (width) { LP_WIDTH_CASE(2); LP_WIDTH_CASE(3); LP_WIDTH_CASE(4); LP_WIDTH_CASE(5); LP_WIDTH_CASE(6); LP_WIDTH_CASE(7); LP_WIDTH_CASE(8); LP_WIDTH_CASE(9); LP_WIDTH_CASE(10); LP_WIDTH_CASE(11); LP_WIDTH_CASE(12); LP_WIDTH_CASE(13); LP_WIDTH_CASE(14); LP_WIDTH_CASE(15); LP_WIDTH_CASE(16); } } // Otherwise, we have an unhandled width and/or stride. return false; #undef L2_STRIDE_CASE #undef L2_WIDTH_CASE #undef LP_STRIDE_CASE #undef LP_WIDTH_CASE } #include <THCUNN/generic/FeatureLPPooling.cu> #include <THC/THCGenerateFloatTypes.h>
910f919f9656474608311323c2b64bdf95e5ff68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define CUDA_KERNEL #include "fluid_system_kern.cuh" #include "cutil_math.h" #include "radixsort.cu" // Build in RadixSort #include <math.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> __constant__ fluidParams simData; __constant__ sysParams sysData; #define EPSILON 0.00001 __global__ void insertParticles ( fluidBufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; register float3 gridMin = sysData.gridMin; register float3 gridDelta = sysData.gridDelta; register int3 gridRes = sysData.gridRes; register int3 gridScan = sysData.gridScanMax; register float poff = sysData.psmoothradius / sysData.psimscale; register float3 gcf = (buf.mpos[i] - gridMin) * gridDelta; register int3 gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); register int gs = (gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; if ( gc.x >= 1 && gc.x <= gridScan.x && gc.y >= 1 && gc.y <= gridScan.y && gc.z >= 1 && gc.z <= gridScan.z ) { buf.mgcell[i] = gs; // Grid cell insert. buf.mgndx[i] = atomicAdd ( &buf.mgridcnt[ gs ], 1 ); // Grid counts. gcf = (-make_float3(poff,poff,poff) + buf.mpos[i] - gridMin) * gridDelta; gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); gs = ( gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; } else { buf.mgcell[i] = GRID_UNDEF; } } // Counting Sort - Full (deep copy) __global__ void countingSortFull ( fluidBufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Copy particle from original, unsorted buffer (msortbuf), // into sorted memory location on device (mpos/mvel) uint icell = *(uint*) (buf.msortbuf + pnum*BUF_GCELL + i*sizeof(uint) ); uint indx = *(uint*) (buf.msortbuf + pnum*BUF_GNDX + i*sizeof(uint) ); if ( icell != GRID_UNDEF ) { // Determine the sort_ndx, location of the particle after sort int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset // Find the original particle data, offset into unsorted buffer (msortbuf) char* bpos = buf.msortbuf + i*sizeof(float3); // Transfer data to sort location buf.mgrid[ sort_ndx ] = sort_ndx; // full sort, grid indexing becomes identity buf.mpos[ sort_ndx ] = *(float3*) (bpos); buf.mvel[ sort_ndx ] = *(float3*) (bpos+ pnum*BUF_VEL); buf.mveleval[ sort_ndx ] = *(float3*) (bpos+ pnum*BUF_VELEVAL); buf.mclr[ sort_ndx ] = *(uint*) (buf.msortbuf + pnum*BUF_CLR + i*sizeof(uint)); // ((uint) 255)<<24; -- dark matter buf.mId[ sort_ndx ] = *(int*) (buf.msortbuf + pnum*BUF_ID + i*sizeof(int)); // ((uint) 255)<<24; -- dark matter buf.mIsBoundary [sort_ndx]= *(bool*) (buf.msortbuf + pnum*BUF_ISBOUNDARY + i*sizeof(bool)); // ((uint) 255)<<24; -- dark matter buf.mNorm[sort_ndx] = *(float3*) (buf.msortbuf + pnum*BUF_NORM + i*sizeof(float3)); // ((uint) 255)<<24; -- dark matter buf.mgcell[ sort_ndx ] = icell; buf.mgndx[ sort_ndx ] = indx; } } __device__ float contributePressure ( int i, float3 p, int cell, fluidBufList buf) { float sum= 0.0;; register float d2 = sysData.d2; register float r2 = sysData.rd2; if ( buf.mgridcnt[cell] == 0 ) {sum = 0.0;} else { int cfirst = buf.mgridoff[ cell ]; int clast = cfirst + buf.mgridcnt[ cell ]; for ( int cndx = cfirst; cndx < clast; cndx++ ) { int j = buf.mgrid[ cndx ]; float length = 2.0*sysData.radius; float3 dist = p-buf.mpos[j]; float de = sqrt(dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); // calculate arc length float dg = length*asin(de/length); float dsq = dg*dg; if ( dsq <= r2 && dsq >= 0.0) { float c = (r2 - dsq)*d2; float c3 = c * c * c; c3*=simData.pmass; sum += c3; } } } return (sum); } __global__ void computePressure ( fluidBufList buf, int pnum, float restDen ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (sysData.gridRes.z + 1)*sysData.gridRes.x + 1; uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Density register float3 pos = buf.mpos[ i ]; float sum = 0.0; for (int c=0; c < sysData.gridAdjCnt; c++) { sum += contributePressure ( i, pos, gc + sysData.gridAdj[c], buf); } sum *= sysData.poly6kern; __syncthreads(); if ( sum == 0.0 ) { buf.mdensity[ i ]= restDen; buf.mperDensity[ i ] = 1.0/restDen; buf.mpress[ i ] =0.0; return; } buf.mdensity[ i ]= sum; buf.mpress[ i ] = ( sum - restDen) * sysData.pintstiff; buf.mperDensity[i] = 1.0/sum; } __global__ void computeNormal( fluidBufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; float3 pos = buf.mpos[i]; float d = sqrt(pos.x*pos.x+pos.y*pos.y+pos.z*pos.z); buf.mNorm[i] = make_float3(pos.x/d,pos.y/d,pos.z/d); } __device__ float3 contributeForceFluid ( int i, int cell, fluidBufList buf,float dens) { //if i is boundary it should only have boundary neighbors float3 force = make_float3(0,0,0); //force from fluid-fluid if ( buf.mgridcnt[cell] == 0 ) { force = make_float3(0,0,0);} else { for ( int cndx = buf.mgridoff[ cell ]; cndx < buf.mgridoff[ cell ] + buf.mgridcnt[ cell ]; cndx++ ) { int j = buf.mgrid[ cndx ]; float3 dist = buf.mpos[i]-buf.mpos[j]; float length = 2.0*sysData.radius; float de = sqrt(dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); // calculate arc length float dg = length*asin(de/length); float dsq2 = dg*dg; float r2 = sysData.rd2; float r = sysData.psmoothradius; if ( dsq2 < r2 && dsq2 > 0) { float dsq = dg*sysData.psimscale; float c = ( r - dsq ); if(dsq2 < r2) { c = ( r - dsq ); float pterm = -1.0*sysData.psimscale * c * sysData.spikykern * ( buf.mpress[ i ]* buf.mperDensity[ i ]* buf.mperDensity[ i ] + buf.mpress[ j ]* buf.mperDensity[ j ]* buf.mperDensity[ j ] ) / dsq; force += (pterm * dist*c); } } } } return (force*simData.pmass); } __global__ void computeForce ( fluidBufList buf, int pnum ,float dens) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= (sysData.gridRes.z + 1)*sysData.gridRes.x + 1; // Sum Pressures register float3 forceF = make_float3(0,0,0); for (int c=0; c < sysData.gridAdjCnt; c++) { forceF += contributeForceFluid ( i, gc + sysData.gridAdj[c], buf, dens ); } buf.mforce[ i ] = forceF - buf.mNorm[i]*dot(buf.mNorm[i],forceF); //all the particles are surface particles } __global__ void advanceParticles ( float time, float dt, fluidBufList buf, int numPnts) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= numPnts ) return; buf.mPoPos[i] = buf.mpos[i]; if ( buf.mgcell[i] == GRID_UNDEF ) { buf.mPoPos[i] = make_float3(-1000,-1000,-1000); buf.mvel[i] = make_float3(0,0,0); return; } // Get particle vars register float speed; register float3 accel; // Leapfrog integration accel = buf.mforce[i]; // Accel Limit speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z; if ( speed > simData.AL2 ) { accel *= simData.AL / sqrt(speed); } // Velocity Limit float3 vel = buf.mvel[i]; speed = vel.x*vel.x + vel.y*vel.y + vel.z*vel.z; if ( speed > simData.VL2 ) { speed = simData.VL2; vel *= simData.VL / sqrt(speed); } float3 vnext = accel*dt + vel; // v(t+1/2) = v(t-1/2) + a(t) dt buf.mveleval[i] = (vel + vnext) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 buf.mvel[i] = vnext; float3 step = vnext * (dt/sysData.psimscale); float movedist = step.x*step.x+step.y*step.y+step.z*step.z; if(movedist>EPSILON) { buf.mPoPos[i] += step; // p(t+1) = p(t) + v(t+1/2) dt } buf.mvel[i] *=0.9; // behave as viscosity } __global__ void updateColor(fluidBufList buf, int numPnts , int c) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= numPnts ) return; if (c==0) //four colors { if(buf.mNorm[i].y>0&&buf.mNorm[i].x>0) { buf.mclr[ i ] = 0xFF0000FF; } else if(buf.mNorm[i].y<0&&buf.mNorm[i].x>0) { buf.mclr[ i ] = 0xFF00FF00; } else if(buf.mNorm[i].y<0&&buf.mNorm[i].x<0) { buf.mclr[ i ] = 0xFFFF0000; } else { buf.mclr[ i ] = 0xFFFF00FF; } } else if(c==1) // show half sphere { if(buf.mNorm[i].z>0) { buf.mclr[ i ] = 0xFFFF0000; } else { buf.mclr[ i ] = 0x00FF0000; } } else if(c==2) // one color { buf.mclr[ i ] = 0xFFFF0000; } } __global__ void updateParticles ( fluidBufList buf, int numPnts ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= numPnts ) return; float3 p = buf.mPoPos[i]; float l2 = p.x*p.x+p.y*p.y+p.z*p.z; if(l2!=sysData.radius*sysData.radius&&l2>0) { float l=sqrt(l2); buf.mpos[i] = make_float3(p.x*sysData.radius/l,p.y*sysData.radius/l,p.z*sysData.radius/l);//projection } else { buf.mvel[i] = make_float3(0,0,0); buf.mveleval[i] = make_float3(0,0,0); } } void updateSysParams(sysParams* sysp) { #ifdef CUDA_42 hipMemcpyToSymbol ( "sysData", sysp, sizeof(sysParams) ); #else hipMemcpyToSymbol ( sysData, sysp, sizeof(sysParams) ); #endif } void updateSimParams ( fluidParams* cpufp) { #ifdef CUDA_42 hipMemcpyToSymbol ( "simData", cpufp, sizeof(fluidParams) ); #else hipMemcpyToSymbol ( simData, cpufp, sizeof(fluidParams) ); #endif }
910f919f9656474608311323c2b64bdf95e5ff68.cu
#define CUDA_KERNEL #include "fluid_system_kern.cuh" #include "cutil_math.h" #include "radixsort.cu" // Build in RadixSort #include <math.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> __constant__ fluidParams simData; __constant__ sysParams sysData; #define EPSILON 0.00001 __global__ void insertParticles ( fluidBufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; register float3 gridMin = sysData.gridMin; register float3 gridDelta = sysData.gridDelta; register int3 gridRes = sysData.gridRes; register int3 gridScan = sysData.gridScanMax; register float poff = sysData.psmoothradius / sysData.psimscale; register float3 gcf = (buf.mpos[i] - gridMin) * gridDelta; register int3 gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); register int gs = (gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; if ( gc.x >= 1 && gc.x <= gridScan.x && gc.y >= 1 && gc.y <= gridScan.y && gc.z >= 1 && gc.z <= gridScan.z ) { buf.mgcell[i] = gs; // Grid cell insert. buf.mgndx[i] = atomicAdd ( &buf.mgridcnt[ gs ], 1 ); // Grid counts. gcf = (-make_float3(poff,poff,poff) + buf.mpos[i] - gridMin) * gridDelta; gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); gs = ( gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; } else { buf.mgcell[i] = GRID_UNDEF; } } // Counting Sort - Full (deep copy) __global__ void countingSortFull ( fluidBufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Copy particle from original, unsorted buffer (msortbuf), // into sorted memory location on device (mpos/mvel) uint icell = *(uint*) (buf.msortbuf + pnum*BUF_GCELL + i*sizeof(uint) ); uint indx = *(uint*) (buf.msortbuf + pnum*BUF_GNDX + i*sizeof(uint) ); if ( icell != GRID_UNDEF ) { // Determine the sort_ndx, location of the particle after sort int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset // Find the original particle data, offset into unsorted buffer (msortbuf) char* bpos = buf.msortbuf + i*sizeof(float3); // Transfer data to sort location buf.mgrid[ sort_ndx ] = sort_ndx; // full sort, grid indexing becomes identity buf.mpos[ sort_ndx ] = *(float3*) (bpos); buf.mvel[ sort_ndx ] = *(float3*) (bpos+ pnum*BUF_VEL); buf.mveleval[ sort_ndx ] = *(float3*) (bpos+ pnum*BUF_VELEVAL); buf.mclr[ sort_ndx ] = *(uint*) (buf.msortbuf + pnum*BUF_CLR + i*sizeof(uint)); // ((uint) 255)<<24; -- dark matter buf.mId[ sort_ndx ] = *(int*) (buf.msortbuf + pnum*BUF_ID + i*sizeof(int)); // ((uint) 255)<<24; -- dark matter buf.mIsBoundary [sort_ndx]= *(bool*) (buf.msortbuf + pnum*BUF_ISBOUNDARY + i*sizeof(bool)); // ((uint) 255)<<24; -- dark matter buf.mNorm[sort_ndx] = *(float3*) (buf.msortbuf + pnum*BUF_NORM + i*sizeof(float3)); // ((uint) 255)<<24; -- dark matter buf.mgcell[ sort_ndx ] = icell; buf.mgndx[ sort_ndx ] = indx; } } __device__ float contributePressure ( int i, float3 p, int cell, fluidBufList buf) { float sum= 0.0;; register float d2 = sysData.d2; register float r2 = sysData.rd2; if ( buf.mgridcnt[cell] == 0 ) {sum = 0.0;} else { int cfirst = buf.mgridoff[ cell ]; int clast = cfirst + buf.mgridcnt[ cell ]; for ( int cndx = cfirst; cndx < clast; cndx++ ) { int j = buf.mgrid[ cndx ]; float length = 2.0*sysData.radius; float3 dist = p-buf.mpos[j]; float de = sqrt(dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); // calculate arc length float dg = length*asin(de/length); float dsq = dg*dg; if ( dsq <= r2 && dsq >= 0.0) { float c = (r2 - dsq)*d2; float c3 = c * c * c; c3*=simData.pmass; sum += c3; } } } return (sum); } __global__ void computePressure ( fluidBufList buf, int pnum, float restDen ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (sysData.gridRes.z + 1)*sysData.gridRes.x + 1; uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Density register float3 pos = buf.mpos[ i ]; float sum = 0.0; for (int c=0; c < sysData.gridAdjCnt; c++) { sum += contributePressure ( i, pos, gc + sysData.gridAdj[c], buf); } sum *= sysData.poly6kern; __syncthreads(); if ( sum == 0.0 ) { buf.mdensity[ i ]= restDen; buf.mperDensity[ i ] = 1.0/restDen; buf.mpress[ i ] =0.0; return; } buf.mdensity[ i ]= sum; buf.mpress[ i ] = ( sum - restDen) * sysData.pintstiff; buf.mperDensity[i] = 1.0/sum; } __global__ void computeNormal( fluidBufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; float3 pos = buf.mpos[i]; float d = sqrt(pos.x*pos.x+pos.y*pos.y+pos.z*pos.z); buf.mNorm[i] = make_float3(pos.x/d,pos.y/d,pos.z/d); } __device__ float3 contributeForceFluid ( int i, int cell, fluidBufList buf,float dens) { //if i is boundary it should only have boundary neighbors float3 force = make_float3(0,0,0); //force from fluid-fluid if ( buf.mgridcnt[cell] == 0 ) { force = make_float3(0,0,0);} else { for ( int cndx = buf.mgridoff[ cell ]; cndx < buf.mgridoff[ cell ] + buf.mgridcnt[ cell ]; cndx++ ) { int j = buf.mgrid[ cndx ]; float3 dist = buf.mpos[i]-buf.mpos[j]; float length = 2.0*sysData.radius; float de = sqrt(dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); // calculate arc length float dg = length*asin(de/length); float dsq2 = dg*dg; float r2 = sysData.rd2; float r = sysData.psmoothradius; if ( dsq2 < r2 && dsq2 > 0) { float dsq = dg*sysData.psimscale; float c = ( r - dsq ); if(dsq2 < r2) { c = ( r - dsq ); float pterm = -1.0*sysData.psimscale * c * sysData.spikykern * ( buf.mpress[ i ]* buf.mperDensity[ i ]* buf.mperDensity[ i ] + buf.mpress[ j ]* buf.mperDensity[ j ]* buf.mperDensity[ j ] ) / dsq; force += (pterm * dist*c); } } } } return (force*simData.pmass); } __global__ void computeForce ( fluidBufList buf, int pnum ,float dens) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= (sysData.gridRes.z + 1)*sysData.gridRes.x + 1; // Sum Pressures register float3 forceF = make_float3(0,0,0); for (int c=0; c < sysData.gridAdjCnt; c++) { forceF += contributeForceFluid ( i, gc + sysData.gridAdj[c], buf, dens ); } buf.mforce[ i ] = forceF - buf.mNorm[i]*dot(buf.mNorm[i],forceF); //all the particles are surface particles } __global__ void advanceParticles ( float time, float dt, fluidBufList buf, int numPnts) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= numPnts ) return; buf.mPoPos[i] = buf.mpos[i]; if ( buf.mgcell[i] == GRID_UNDEF ) { buf.mPoPos[i] = make_float3(-1000,-1000,-1000); buf.mvel[i] = make_float3(0,0,0); return; } // Get particle vars register float speed; register float3 accel; // Leapfrog integration accel = buf.mforce[i]; // Accel Limit speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z; if ( speed > simData.AL2 ) { accel *= simData.AL / sqrt(speed); } // Velocity Limit float3 vel = buf.mvel[i]; speed = vel.x*vel.x + vel.y*vel.y + vel.z*vel.z; if ( speed > simData.VL2 ) { speed = simData.VL2; vel *= simData.VL / sqrt(speed); } float3 vnext = accel*dt + vel; // v(t+1/2) = v(t-1/2) + a(t) dt buf.mveleval[i] = (vel + vnext) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 buf.mvel[i] = vnext; float3 step = vnext * (dt/sysData.psimscale); float movedist = step.x*step.x+step.y*step.y+step.z*step.z; if(movedist>EPSILON) { buf.mPoPos[i] += step; // p(t+1) = p(t) + v(t+1/2) dt } buf.mvel[i] *=0.9; // behave as viscosity } __global__ void updateColor(fluidBufList buf, int numPnts , int c) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= numPnts ) return; if (c==0) //four colors { if(buf.mNorm[i].y>0&&buf.mNorm[i].x>0) { buf.mclr[ i ] = 0xFF0000FF; } else if(buf.mNorm[i].y<0&&buf.mNorm[i].x>0) { buf.mclr[ i ] = 0xFF00FF00; } else if(buf.mNorm[i].y<0&&buf.mNorm[i].x<0) { buf.mclr[ i ] = 0xFFFF0000; } else { buf.mclr[ i ] = 0xFFFF00FF; } } else if(c==1) // show half sphere { if(buf.mNorm[i].z>0) { buf.mclr[ i ] = 0xFFFF0000; } else { buf.mclr[ i ] = 0x00FF0000; } } else if(c==2) // one color { buf.mclr[ i ] = 0xFFFF0000; } } __global__ void updateParticles ( fluidBufList buf, int numPnts ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= numPnts ) return; float3 p = buf.mPoPos[i]; float l2 = p.x*p.x+p.y*p.y+p.z*p.z; if(l2!=sysData.radius*sysData.radius&&l2>0) { float l=sqrt(l2); buf.mpos[i] = make_float3(p.x*sysData.radius/l,p.y*sysData.radius/l,p.z*sysData.radius/l);//projectionÁ─╣ř│╠íú } else { buf.mvel[i] = make_float3(0,0,0); buf.mveleval[i] = make_float3(0,0,0); } } void updateSysParams(sysParams* sysp) { #ifdef CUDA_42 cudaMemcpyToSymbol ( "sysData", sysp, sizeof(sysParams) ); #else cudaMemcpyToSymbol ( sysData, sysp, sizeof(sysParams) ); #endif } void updateSimParams ( fluidParams* cpufp) { #ifdef CUDA_42 cudaMemcpyToSymbol ( "simData", cpufp, sizeof(fluidParams) ); #else cudaMemcpyToSymbol ( simData, cpufp, sizeof(fluidParams) ); #endif }
8bfd085d82dcd4f796795cd915e6f55b2e50a159.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // MultiValuedCsrGraph<int, double>* contract_graph_mv_double_sum(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, hipStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(contract_from_aggregates_t<int, double, SemiRingFctrSelector<Sum, double>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce))); } }
8bfd085d82dcd4f796795cd915e6f55b2e50a159.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph_contracting_visitor.hxx> namespace nvgraph { //------------------------- Graph Contraction: ---------------------- // MultiValuedCsrGraph<int, double>* contract_graph_mv_double_sum(MultiValuedCsrGraph<int, double>& graph, int* pV, size_t n, cudaStream_t stream, const int& VCombine, const int& VReduce, const int& ECombine, const int& EReduce) { return static_cast<nvgraph::MultiValuedCsrGraph<int, double>*>(contract_from_aggregates_t<int, double, SemiRingFctrSelector<Sum, double>::FctrType >(graph, pV, n, stream, static_cast<SemiRingFunctorTypes>(VCombine), static_cast<SemiRingFunctorTypes>(VReduce), static_cast<SemiRingFunctorTypes>(ECombine), static_cast<SemiRingFunctorTypes>(EReduce))); } }
fc09de4a423156648fceaffec52d06a7c2f0dcc8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void multiplyTanh(float* out, float* in1, float* in2, int size){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size) out[id] = in1[id] * in2[id]; }
fc09de4a423156648fceaffec52d06a7c2f0dcc8.cu
#include "includes.h" __global__ void multiplyTanh(float* out, float* in1, float* in2, int size){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size) out[id] = in1[id] * in2[id]; }
c9b4a2702328ef5a6e996e7d3aae9a1b9517069f.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/detail/concatenate.cuh> #include <cudf/detail/copy.hpp> #include <cudf/detail/interop.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/transform.hpp> #include <cudf/detail/unary.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/interop.hpp> #include <cudf/null_mask.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> namespace cudf { namespace detail { data_type arrow_to_cudf_type(arrow::DataType const& arrow_type) { switch (arrow_type.id()) { case arrow::Type::NA: return data_type(type_id::EMPTY); case arrow::Type::BOOL: return data_type(type_id::BOOL8); case arrow::Type::INT8: return data_type(type_id::INT8); case arrow::Type::INT16: return data_type(type_id::INT16); case arrow::Type::INT32: return data_type(type_id::INT32); case arrow::Type::INT64: return data_type(type_id::INT64); case arrow::Type::UINT8: return data_type(type_id::UINT8); case arrow::Type::UINT16: return data_type(type_id::UINT16); case arrow::Type::UINT32: return data_type(type_id::UINT32); case arrow::Type::UINT64: return data_type(type_id::UINT64); case arrow::Type::FLOAT: return data_type(type_id::FLOAT32); case arrow::Type::DOUBLE: return data_type(type_id::FLOAT64); case arrow::Type::DATE32: return data_type(type_id::TIMESTAMP_DAYS); case arrow::Type::TIMESTAMP: { arrow::TimestampType const* type = static_cast<arrow::TimestampType const*>(&arrow_type); switch (type->unit()) { case arrow::TimeUnit::type::SECOND: return data_type(type_id::TIMESTAMP_SECONDS); case arrow::TimeUnit::type::MILLI: return data_type(type_id::TIMESTAMP_MILLISECONDS); case arrow::TimeUnit::type::MICRO: return data_type(type_id::TIMESTAMP_MICROSECONDS); case arrow::TimeUnit::type::NANO: return data_type(type_id::TIMESTAMP_NANOSECONDS); default: CUDF_FAIL("Unsupported timestamp unit in arrow"); } } case arrow::Type::DURATION: { arrow::DurationType const* type = static_cast<arrow::DurationType const*>(&arrow_type); switch (type->unit()) { case arrow::TimeUnit::type::SECOND: return data_type(type_id::DURATION_SECONDS); case arrow::TimeUnit::type::MILLI: return data_type(type_id::DURATION_MILLISECONDS); case arrow::TimeUnit::type::MICRO: return data_type(type_id::DURATION_MICROSECONDS); case arrow::TimeUnit::type::NANO: return data_type(type_id::DURATION_NANOSECONDS); default: CUDF_FAIL("Unsupported duration unit in arrow"); } } case arrow::Type::STRING: return data_type(type_id::STRING); case arrow::Type::DICTIONARY: return data_type(type_id::DICTIONARY32); case arrow::Type::LIST: return data_type(type_id::LIST); default: CUDF_FAIL("Unsupported type_id conversion to cudf"); } } namespace { /** * @brief Functor to return column for a corresponding arrow array. column * is formed from buffer underneath the arrow array along with any offset and * change in length that array has. */ struct dispatch_to_cudf_column { /** * @brief Returns mask from an array withut any offsets. */ std::unique_ptr<rmm::device_buffer> get_mask_buffer(arrow::Array const& array, rmm::mr::device_memory_resource* mr, hipStream_t stream) { if (array.null_bitmap_data() == nullptr) { return std::make_unique<rmm::device_buffer>(0, stream, mr); } auto mask = std::make_unique<rmm::device_buffer>( bitmask_allocation_size_bytes(static_cast<size_type>(array.null_bitmap()->size() * CHAR_BIT)), stream, mr); CUDA_TRY(hipMemcpyAsync(mask->data(), array.null_bitmap_data(), array.null_bitmap()->size(), hipMemcpyHostToDevice, stream)); return mask; } template <typename T> std::unique_ptr<column> operator()(arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, hipStream_t stream) { auto data_buffer = array.data()->buffers[1]; size_type const num_rows = array.length(); auto const has_nulls = skip_mask ? false : array.null_bitmap_data() != nullptr; auto col = make_fixed_width_column(type, num_rows, mask_state::UNALLOCATED, stream, mr); auto mutable_column_view = col->mutable_view(); CUDA_TRY(hipMemcpyAsync(mutable_column_view.data<void*>(), data_buffer->data() + array.offset() * sizeof(T), sizeof(T) * num_rows, hipMemcpyHostToDevice, stream)); if (has_nulls) { auto tmp_mask = get_mask_buffer(array, mr, stream); // If array is sliced, we have to copy whole mask and then take copy. auto out_mask = (num_rows == static_cast<size_type>(data_buffer->size() / sizeof(T))) ? *tmp_mask : copy_bitmask(static_cast<bitmask_type*>(tmp_mask->data()), array.offset(), array.offset() + num_rows, stream, mr); col->set_null_mask(std::move(out_mask)); } return col; } }; /** * @brief Returns cudf column formed from given arrow array * This has been introduced to take care of compiler error "error: explicit specialization of * function must precede its first use" */ std::unique_ptr<column> get_column(arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, hipStream_t stream); template <> std::unique_ptr<column> dispatch_to_cudf_column::operator()<bool>( arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, hipStream_t stream) { auto data_buffer = array.data()->buffers[1]; auto data = rmm::device_buffer(data_buffer->size(), stream, mr); CUDA_TRY(hipMemcpyAsync( data.data(), data_buffer->data(), data_buffer->size(), hipMemcpyHostToDevice, stream)); auto out_col = mask_to_bools(static_cast<bitmask_type*>(data.data()), array.offset(), array.offset() + array.length(), stream, mr); auto const has_nulls = skip_mask ? false : array.null_bitmap_data() != nullptr; if (has_nulls) { auto out_mask = copy_bitmask(static_cast<bitmask_type*>(get_mask_buffer(array, mr, stream)->data()), array.offset(), array.offset() + array.length(), stream, mr); out_col->set_null_mask(std::move(out_mask)); } return out_col; } template <> std::unique_ptr<column> dispatch_to_cudf_column::operator()<cudf::string_view>( arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, hipStream_t stream) { if (array.length() == 0) { return cudf::strings::detail::make_empty_strings_column(mr, stream); } auto str_array = static_cast<arrow::StringArray const*>(&array); auto offset_array = std::make_unique<arrow::Int32Array>( str_array->value_offsets()->size() / sizeof(int32_t), str_array->value_offsets(), nullptr); auto char_array = std::make_unique<arrow::Int8Array>( str_array->value_data()->size(), str_array->value_data(), nullptr); auto offsets_column = dispatch_to_cudf_column{}.operator()<int32_t>( *offset_array, data_type(type_id::INT32), true, mr, stream); auto chars_column = dispatch_to_cudf_column{}.operator()<int8_t>( *char_array, data_type(type_id::INT8), true, mr, stream); auto const num_rows = offsets_column->size() - 1; auto out_col = make_strings_column(num_rows, std::move(offsets_column), std::move(chars_column), UNKNOWN_NULL_COUNT, std::move(*get_mask_buffer(array, mr, stream)), stream, mr); return num_rows == array.length() ? std::move(out_col) : std::make_unique<column>(cudf::detail::slice( out_col->view(), static_cast<size_type>(array.offset()), static_cast<size_type>(array.offset() + array.length()))); } template <> std::unique_ptr<column> dispatch_to_cudf_column::operator()<cudf::dictionary32>( arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, hipStream_t stream) { auto dict_array = static_cast<arrow::DictionaryArray const*>(&array); auto ind_type = arrow_to_cudf_type(*(dict_array->indices()->type())); auto indices_column = get_column(*(dict_array->indices()), ind_type, false, mr, stream); // If index type is not of type int32_t, then cast it to int32_t if (indices_column->type().id() != type_id::INT32) indices_column = cudf::detail::cast(indices_column->view(), data_type(type_id::INT32), mr, stream); auto dict_type = arrow_to_cudf_type(*(dict_array->dictionary()->type())); auto keys_column = get_column(*(dict_array->dictionary()), dict_type, true, mr, stream); // Child columns shouldn't have masks and we need the mask in main column auto column_contents = indices_column->release(); indices_column = std::make_unique<column>(data_type(type_id::INT32), static_cast<size_type>(array.length()), std::move(*(column_contents.data))); return make_dictionary_column(std::move(keys_column), std::move(indices_column), std::move(*(column_contents.null_mask)), UNKNOWN_NULL_COUNT); } template <> std::unique_ptr<column> dispatch_to_cudf_column::operator()<cudf::list_view>( arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, hipStream_t stream) { auto list_array = static_cast<arrow::ListArray const*>(&array); auto offset_array = std::make_unique<arrow::Int32Array>( list_array->value_offsets()->size() / sizeof(int32_t), list_array->value_offsets(), nullptr); auto offsets_column = dispatch_to_cudf_column{}.operator()<int32_t>( *offset_array, data_type(type_id::INT32), true, mr, stream); auto child_type = arrow_to_cudf_type(*(list_array->values()->type())); auto child_column = get_column(*(list_array->values()), child_type, false, mr, stream); auto const num_rows = offsets_column->size() - 1; auto out_col = make_lists_column(num_rows, std::move(offsets_column), std::move(child_column), UNKNOWN_NULL_COUNT, std::move(*get_mask_buffer(array, mr, stream))); return num_rows == array.length() ? std::move(out_col) : std::make_unique<column>(cudf::detail::slice( out_col->view(), static_cast<size_type>(array.offset()), static_cast<size_type>(array.offset() + array.length()))); } std::unique_ptr<column> get_column(arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, hipStream_t stream) { return type_dispatcher(type, dispatch_to_cudf_column{}, array, type, skip_mask, mr, stream); } } // namespace std::unique_ptr<table> from_arrow(arrow::Table const& input_table, rmm::mr::device_memory_resource* mr, hipStream_t stream) { if (input_table.num_columns() == 0) { return std::make_unique<table>(); } std::vector<std::unique_ptr<column>> columns; auto chunked_arrays = input_table.columns(); std::transform(chunked_arrays.begin(), chunked_arrays.end(), std::back_inserter(columns), [&mr, &stream](auto const& chunked_array) { std::vector<std::unique_ptr<column>> concat_columns; auto cudf_type = arrow_to_cudf_type(*(chunked_array->type())); auto array_chunks = chunked_array->chunks(); if (cudf_type.id() == type_id::EMPTY) { return std::make_unique<column>( cudf_type, chunked_array->length(), std::move(rmm::device_buffer(0))); } transform(array_chunks.begin(), array_chunks.end(), std::back_inserter(concat_columns), [&cudf_type, &mr, &stream](auto const& array_chunk) { return get_column(*array_chunk, cudf_type, false, mr, stream); }); if (concat_columns.size() == 0) { return std::make_unique<column>(cudf_type, 0, rmm::device_buffer(0)); } else if (concat_columns.size() == 1) { return std::move(concat_columns[0]); } std::vector<cudf::column_view> column_views; std::transform(concat_columns.begin(), concat_columns.end(), std::back_inserter(column_views), [](auto const& col) { return col->view(); }); return cudf::detail::concatenate(column_views, mr, stream); }); return std::make_unique<table>(std::move(columns)); } } // namespace detail std::unique_ptr<table> from_arrow(arrow::Table const& input_table, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::from_arrow(input_table, mr); } } // namespace cudf
c9b4a2702328ef5a6e996e7d3aae9a1b9517069f.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/detail/concatenate.cuh> #include <cudf/detail/copy.hpp> #include <cudf/detail/interop.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/transform.hpp> #include <cudf/detail/unary.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/interop.hpp> #include <cudf/null_mask.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> namespace cudf { namespace detail { data_type arrow_to_cudf_type(arrow::DataType const& arrow_type) { switch (arrow_type.id()) { case arrow::Type::NA: return data_type(type_id::EMPTY); case arrow::Type::BOOL: return data_type(type_id::BOOL8); case arrow::Type::INT8: return data_type(type_id::INT8); case arrow::Type::INT16: return data_type(type_id::INT16); case arrow::Type::INT32: return data_type(type_id::INT32); case arrow::Type::INT64: return data_type(type_id::INT64); case arrow::Type::UINT8: return data_type(type_id::UINT8); case arrow::Type::UINT16: return data_type(type_id::UINT16); case arrow::Type::UINT32: return data_type(type_id::UINT32); case arrow::Type::UINT64: return data_type(type_id::UINT64); case arrow::Type::FLOAT: return data_type(type_id::FLOAT32); case arrow::Type::DOUBLE: return data_type(type_id::FLOAT64); case arrow::Type::DATE32: return data_type(type_id::TIMESTAMP_DAYS); case arrow::Type::TIMESTAMP: { arrow::TimestampType const* type = static_cast<arrow::TimestampType const*>(&arrow_type); switch (type->unit()) { case arrow::TimeUnit::type::SECOND: return data_type(type_id::TIMESTAMP_SECONDS); case arrow::TimeUnit::type::MILLI: return data_type(type_id::TIMESTAMP_MILLISECONDS); case arrow::TimeUnit::type::MICRO: return data_type(type_id::TIMESTAMP_MICROSECONDS); case arrow::TimeUnit::type::NANO: return data_type(type_id::TIMESTAMP_NANOSECONDS); default: CUDF_FAIL("Unsupported timestamp unit in arrow"); } } case arrow::Type::DURATION: { arrow::DurationType const* type = static_cast<arrow::DurationType const*>(&arrow_type); switch (type->unit()) { case arrow::TimeUnit::type::SECOND: return data_type(type_id::DURATION_SECONDS); case arrow::TimeUnit::type::MILLI: return data_type(type_id::DURATION_MILLISECONDS); case arrow::TimeUnit::type::MICRO: return data_type(type_id::DURATION_MICROSECONDS); case arrow::TimeUnit::type::NANO: return data_type(type_id::DURATION_NANOSECONDS); default: CUDF_FAIL("Unsupported duration unit in arrow"); } } case arrow::Type::STRING: return data_type(type_id::STRING); case arrow::Type::DICTIONARY: return data_type(type_id::DICTIONARY32); case arrow::Type::LIST: return data_type(type_id::LIST); default: CUDF_FAIL("Unsupported type_id conversion to cudf"); } } namespace { /** * @brief Functor to return column for a corresponding arrow array. column * is formed from buffer underneath the arrow array along with any offset and * change in length that array has. */ struct dispatch_to_cudf_column { /** * @brief Returns mask from an array withut any offsets. */ std::unique_ptr<rmm::device_buffer> get_mask_buffer(arrow::Array const& array, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { if (array.null_bitmap_data() == nullptr) { return std::make_unique<rmm::device_buffer>(0, stream, mr); } auto mask = std::make_unique<rmm::device_buffer>( bitmask_allocation_size_bytes(static_cast<size_type>(array.null_bitmap()->size() * CHAR_BIT)), stream, mr); CUDA_TRY(cudaMemcpyAsync(mask->data(), array.null_bitmap_data(), array.null_bitmap()->size(), cudaMemcpyHostToDevice, stream)); return mask; } template <typename T> std::unique_ptr<column> operator()(arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { auto data_buffer = array.data()->buffers[1]; size_type const num_rows = array.length(); auto const has_nulls = skip_mask ? false : array.null_bitmap_data() != nullptr; auto col = make_fixed_width_column(type, num_rows, mask_state::UNALLOCATED, stream, mr); auto mutable_column_view = col->mutable_view(); CUDA_TRY(cudaMemcpyAsync(mutable_column_view.data<void*>(), data_buffer->data() + array.offset() * sizeof(T), sizeof(T) * num_rows, cudaMemcpyHostToDevice, stream)); if (has_nulls) { auto tmp_mask = get_mask_buffer(array, mr, stream); // If array is sliced, we have to copy whole mask and then take copy. auto out_mask = (num_rows == static_cast<size_type>(data_buffer->size() / sizeof(T))) ? *tmp_mask : copy_bitmask(static_cast<bitmask_type*>(tmp_mask->data()), array.offset(), array.offset() + num_rows, stream, mr); col->set_null_mask(std::move(out_mask)); } return col; } }; /** * @brief Returns cudf column formed from given arrow array * This has been introduced to take care of compiler error "error: explicit specialization of * function must precede its first use" */ std::unique_ptr<column> get_column(arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, cudaStream_t stream); template <> std::unique_ptr<column> dispatch_to_cudf_column::operator()<bool>( arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { auto data_buffer = array.data()->buffers[1]; auto data = rmm::device_buffer(data_buffer->size(), stream, mr); CUDA_TRY(cudaMemcpyAsync( data.data(), data_buffer->data(), data_buffer->size(), cudaMemcpyHostToDevice, stream)); auto out_col = mask_to_bools(static_cast<bitmask_type*>(data.data()), array.offset(), array.offset() + array.length(), stream, mr); auto const has_nulls = skip_mask ? false : array.null_bitmap_data() != nullptr; if (has_nulls) { auto out_mask = copy_bitmask(static_cast<bitmask_type*>(get_mask_buffer(array, mr, stream)->data()), array.offset(), array.offset() + array.length(), stream, mr); out_col->set_null_mask(std::move(out_mask)); } return out_col; } template <> std::unique_ptr<column> dispatch_to_cudf_column::operator()<cudf::string_view>( arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { if (array.length() == 0) { return cudf::strings::detail::make_empty_strings_column(mr, stream); } auto str_array = static_cast<arrow::StringArray const*>(&array); auto offset_array = std::make_unique<arrow::Int32Array>( str_array->value_offsets()->size() / sizeof(int32_t), str_array->value_offsets(), nullptr); auto char_array = std::make_unique<arrow::Int8Array>( str_array->value_data()->size(), str_array->value_data(), nullptr); auto offsets_column = dispatch_to_cudf_column{}.operator()<int32_t>( *offset_array, data_type(type_id::INT32), true, mr, stream); auto chars_column = dispatch_to_cudf_column{}.operator()<int8_t>( *char_array, data_type(type_id::INT8), true, mr, stream); auto const num_rows = offsets_column->size() - 1; auto out_col = make_strings_column(num_rows, std::move(offsets_column), std::move(chars_column), UNKNOWN_NULL_COUNT, std::move(*get_mask_buffer(array, mr, stream)), stream, mr); return num_rows == array.length() ? std::move(out_col) : std::make_unique<column>(cudf::detail::slice( out_col->view(), static_cast<size_type>(array.offset()), static_cast<size_type>(array.offset() + array.length()))); } template <> std::unique_ptr<column> dispatch_to_cudf_column::operator()<cudf::dictionary32>( arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { auto dict_array = static_cast<arrow::DictionaryArray const*>(&array); auto ind_type = arrow_to_cudf_type(*(dict_array->indices()->type())); auto indices_column = get_column(*(dict_array->indices()), ind_type, false, mr, stream); // If index type is not of type int32_t, then cast it to int32_t if (indices_column->type().id() != type_id::INT32) indices_column = cudf::detail::cast(indices_column->view(), data_type(type_id::INT32), mr, stream); auto dict_type = arrow_to_cudf_type(*(dict_array->dictionary()->type())); auto keys_column = get_column(*(dict_array->dictionary()), dict_type, true, mr, stream); // Child columns shouldn't have masks and we need the mask in main column auto column_contents = indices_column->release(); indices_column = std::make_unique<column>(data_type(type_id::INT32), static_cast<size_type>(array.length()), std::move(*(column_contents.data))); return make_dictionary_column(std::move(keys_column), std::move(indices_column), std::move(*(column_contents.null_mask)), UNKNOWN_NULL_COUNT); } template <> std::unique_ptr<column> dispatch_to_cudf_column::operator()<cudf::list_view>( arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { auto list_array = static_cast<arrow::ListArray const*>(&array); auto offset_array = std::make_unique<arrow::Int32Array>( list_array->value_offsets()->size() / sizeof(int32_t), list_array->value_offsets(), nullptr); auto offsets_column = dispatch_to_cudf_column{}.operator()<int32_t>( *offset_array, data_type(type_id::INT32), true, mr, stream); auto child_type = arrow_to_cudf_type(*(list_array->values()->type())); auto child_column = get_column(*(list_array->values()), child_type, false, mr, stream); auto const num_rows = offsets_column->size() - 1; auto out_col = make_lists_column(num_rows, std::move(offsets_column), std::move(child_column), UNKNOWN_NULL_COUNT, std::move(*get_mask_buffer(array, mr, stream))); return num_rows == array.length() ? std::move(out_col) : std::make_unique<column>(cudf::detail::slice( out_col->view(), static_cast<size_type>(array.offset()), static_cast<size_type>(array.offset() + array.length()))); } std::unique_ptr<column> get_column(arrow::Array const& array, data_type type, bool skip_mask, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { return type_dispatcher(type, dispatch_to_cudf_column{}, array, type, skip_mask, mr, stream); } } // namespace std::unique_ptr<table> from_arrow(arrow::Table const& input_table, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { if (input_table.num_columns() == 0) { return std::make_unique<table>(); } std::vector<std::unique_ptr<column>> columns; auto chunked_arrays = input_table.columns(); std::transform(chunked_arrays.begin(), chunked_arrays.end(), std::back_inserter(columns), [&mr, &stream](auto const& chunked_array) { std::vector<std::unique_ptr<column>> concat_columns; auto cudf_type = arrow_to_cudf_type(*(chunked_array->type())); auto array_chunks = chunked_array->chunks(); if (cudf_type.id() == type_id::EMPTY) { return std::make_unique<column>( cudf_type, chunked_array->length(), std::move(rmm::device_buffer(0))); } transform(array_chunks.begin(), array_chunks.end(), std::back_inserter(concat_columns), [&cudf_type, &mr, &stream](auto const& array_chunk) { return get_column(*array_chunk, cudf_type, false, mr, stream); }); if (concat_columns.size() == 0) { return std::make_unique<column>(cudf_type, 0, rmm::device_buffer(0)); } else if (concat_columns.size() == 1) { return std::move(concat_columns[0]); } std::vector<cudf::column_view> column_views; std::transform(concat_columns.begin(), concat_columns.end(), std::back_inserter(column_views), [](auto const& col) { return col->view(); }); return cudf::detail::concatenate(column_views, mr, stream); }); return std::make_unique<table>(std::move(columns)); } } // namespace detail std::unique_ptr<table> from_arrow(arrow::Table const& input_table, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::from_arrow(input_table, mr); } } // namespace cudf
c2308de7b38f9c3d4f877fd6f754f0a929aa5958.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Cody Thompson Photon Mapping */ #include <iostream> //#include "RayTracer.h" #include "RayTracer.cuh" using namespace std; //extern __shared__ float sh[]; RayTracer::RayTracer(std::vector<Light*>* l, std::vector<SceneObject*>* o) { lights = &((*l)[0]); objects = &((*o)[0]); objSize = (*o).size(); /*printf("OBJ SIZE: %d\n", (*o).size()); printf("ADSFASDF %d\n", objSize); printf("TYPE %d\n", objects[0]->type); for (int i = 0; i < (*o).size(); i++) { if (!(*o)[i]) printf("FUCKKaadsfasdf %d\n", i); }*/ cudaStack = NULL; } RayTracer::RayTracer(std::vector<Light*>* l, std::vector<SceneObject*>* o, int gM, int cM, KDTreeNode* gr, KDTreeNode* cr) { lights = &((*l)[0]); objects = &((*o)[0]); objSize = (*o).size(); numGPhotons = gM; numCPhotons = cM; root = gr; rootC1 = cr; /*printf("OBJ SIZE: %d\n", (*o).size()); printf("ADSFASDF %d\n", objSize); printf("TYPE %d\n", objects[0]->type); for (int i = 0; i < (*o).size(); i++) { if (!objects[i]) printf("FUCKKaadsfasdf %d\n", i); }*/ cudaStack = NULL; } RayTracer::RayTracer(SceneObject** o, int osize, int gM, int cM, KDTreeNode* gr, KDTreeNode* cr) { objects = o; objSize = osize; numGPhotons = gM; numCPhotons = cM; root = gr; rootC1 = cr; cudaStack = NULL; } RayTracer::RayTracer() { cudaStack = NULL; } RayTracer::~RayTracer() { } //Collision* RayTracer::trace(glm::vec3 start, glm::vec3 ray, bool unit) { Collision* RayTracer::trace(glm::vec3 start, glm::vec3 ray, bool unit, int *shI, float *shF) { Collision* c = new Collision(); shF[6] = start.x; shF[7] = start.y; shF[8] = start.z; c->detectRayCollision2(ray, objects, objSize, -1, unit, shI, shF); return c; } Collision* RayTracer::trace(glm::vec3 ray, int *shI, float *shF) { //Collision* RayTracer::trace(glm::vec3 start, glm::vec3 ray, int *shI, float *shF) { //I01: Omit Index Collision* c = new Collision(); shI[2] = objSize; c->detectRayCollision(ray, objects, shI, shF); return c; } __device__ __noinline__ volatile float * volatile RayTracer::getMatInv(glm::vec3 iPt, SceneObject* obj, float *shF) { /* 7: c 8: s 9: t */ glm::mat3 matInv; glm::vec3 normal = obj->getNormal(obj, iPt, shF); glm::vec3 eRay = glm::vec3(0.0, 0.0, 1.0); normal = glm::normalize(normal); if (fabs(ELLIPSOID_SCALE - 1.0) > TOLERANCE && (fabs(fabs(normal[0]) - eRay[0]) > TOLERANCE || fabs(fabs(normal[1]) - eRay[1]) > TOLERANCE || fabs(fabs(normal[2]) - eRay[2]) > TOLERANCE)) { glm::vec3 crossP = glm::cross(eRay, normal); crossP = glm::normalize(crossP); shF[6] = glm::dot(eRay, normal); shF[7] = sin(acos(glm::dot(eRay, normal))); shF[8] = 1.0 - shF[6]; glm::mat3 mat = glm::mat3(shF[8]*crossP.x*crossP.x + shF[6], shF[8]*crossP.x*crossP.y - crossP.z*shF[7], shF[8]*crossP.x*crossP.z + crossP.y*shF[7], shF[8]*crossP.x*crossP.y + crossP.z*shF[7], shF[8]*crossP.y*crossP.y + shF[6], shF[8]*crossP.y*crossP.z - crossP.x*shF[7], shF[8]*crossP.x*crossP.z - crossP.y*shF[7], shF[8]*crossP.y*crossP.z + crossP.x*shF[7], shF[8]*crossP.z*crossP.z + shF[6]); matInv = glm::inverse(mat); } else { matInv = glm::mat3(1.0f); } //volatile float* volatile mInv;// = return glm::value_ptr(matInv); } __device__ __noinline__ glm::vec3 RayTracer::accumulatePhotons(Photon **locateHeap, glm::vec3 iPt, SceneObject* obj, float *shF, int *shI) { glm::vec3 clr(0.0f, 0.0f, 0.0f); for (volatile int i = 0; i < shI[2]; i++) { //BRDF glm::vec3 normal = obj->getNormal(obj, iPt, shF); normal = glm::normalize(normal); float dotProd = glm::dot(-locateHeap[i]->incidence, normal); //if (shI[0] < causts) { //color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); clr += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //} else { //color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //clr += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //} } return clr; } __attribute__ ((noinline)) void __attribute__ ((noinline)) RayTracer::getNormal(SceneObject *obj, glm::vec3 iPt, float *shF) { glm::vec3 normal = obj->getNormal(obj, iPt, shF); normal = glm::normalize(normal); shF[3] = normal.x; shF[4] = normal.y; shF[5] = normal.z; } glm::vec3 RayTracer::calcRadiance(glm::vec3 start, glm::vec3 iPt, SceneObject* obj, bool unit, float scale, float n1, float dropoff, int threadNum, int depth, int *shI, float *shF) { /*float e = 2.71828; float alpha = 0.918; float beta = 1.953;*/ //int causts; //volatile float x = 0.0f, y = 0.0f, z = 0.0f, t = 0.0f, c = 0.0f, s = 0.0f; //printf("THREADNUM: %d\n", threadNum); //float sampleDistSqrd, newRadSqrd;//, scaleN; //Photon** locateHeap;// = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*));; //int heapSize; //glm::vec3 eRay = glm::vec3(0.0, 0.0, 1.0); //glm::mat3 mat, matInv; //volatile float reflectScale = obj->reflection; //volatile float * volatile matInv; //if (1) matInv = (volatile float * volatile)glm::value_ptr(glm::mat3(1.0f));//getMatInv(iPt, obj, shF); //if (1) matInv = getMatInv(iPt, obj, shF); //glm::vec3 normal = obj->getNormal(obj, iPt, shF); getNormal(obj, iPt, shF); //normal = glm::normalize(normal); shF[1] = obj->reflection; //volatile float refract = obj->refraction; shF[2] = obj->refraction; //float dropoffCalc = glm::length(iPt - start); shF[0] = glm::length(iPt - start); //dropoffCalc = pow(dropoff, dropoffCalc); shF[0] = pow(dropoff, shF[0]); //glm::vec3 clr, absorbClr, reflectClr, refractClr; glm::vec3 clr(0.0f, 0.0f, 0.0f); //float n2 = 0.0f;//, reflectScale = 0.0f, tempDO = 0.0f;//, dots1 = 0.0, dots2 = 0.0, temp, temp2, mainDist, mainT, dist, reflectance = 1.0f / obj->roughness, D, F, G, m, sroot, R = 0.0f, R0 = 0.0f, innersqr = 1.0f; //glm::vec3 colorD, colorS, colorA, color, normal, reflectRay, newStart, newIPt, crossP; //glm::vec4 tempNormal, tempStart, tempIPt; //glm::vec3 pigment(obj->pigment.x, obj->pigment.y, obj->pigment.z); //glm::vec3 l, v, h, lcol, dir; //color = glm::vec3(0.0f, 0.0f, 0.0f); //glm::vec3 normal = obj->getNormal(obj, iPt, shF); //normal = glm::normalize(normal); //v = start - iPt; //v = glm::normalize(v); //dir = -v; //Collision* col; //float time = glm::length(iPt - start); //float dropoffCalc = pow(dropoff, time); //absorbClr.x = reflectClr.x = refractClr.x = 0.0; //absorbClr.y = reflectClr.y = refractClr.y = 0.0; //absorbClr.z = reflectClr.z = refractClr.z = 0.0; //colorD = pigment * obj->diffuse; //colorS = pigment * obj->specular; //colorA = pigment * obj->ambient; //if (fabs(1.0f - refract) > TOLERANCE || depth <= 0) { //reflectScale = obj->reflection; //printf("%d\n", threadNum); //Photon** locateHeap = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*)); Photon** locateHeap = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*)); //int heapSize = 0; //heapSize = 0; //if (numCPhotons > 0) rootC1->locatePhotons(1, iPt, locateHeap, &heapSize, 0.05, &newRadSqrd, matInv, numCPhotons, cudaStack + (threadNum * stackPartition)); //causts = heapSize; //volatile int causts = 0;//shI[2]; //sh[threadSpot] = iPt.x; //sh[threadSpot+1] = iPt.y; //sh[threadSpot+2] = iPt.z; shF[7] = INITIAL_SAMPLE_DIST_SQRD; shF[8] = INITIAL_SAMPLE_DIST_SQRD; shI[2] = 0;//heapSize; //asm volatile("membar.cta;"); volatile float * volatile matInv; if (1) matInv = (volatile float * volatile)glm::value_ptr(glm::mat3(1.0f));//getMatInv(iPt, obj, shF); //matInv = getMatInv(iPt, obj, shF); //if (numGPhotons > 0) root->locatePhotons(iPt, locateHeap, matInv, numGPhotons, shF + threadSpotF, shI + threadSpotI);// cudaStack + (threadNum * stackPartition)); shI[0] = numGPhotons; //asm volatile("membar.cta;"); if (numGPhotons) root->locatePhotons(iPt, locateHeap, matInv, numGPhotons, shF, shI);// cudaStack + (threadNum * stackPartition)); //printf("HEAPSIZE FAM: %d\n", heapSize); //sh[threadSpot] = 0.1f; //if (numGPhotons > 0) root->locatePhotons(iPt, threadSpot, locateHeap, sampleDistSqrd, &newRadSqrd, numGPhotons, sh); //printf("sheeeet\n"); //heapSize = shI[2]; //printf("HS: %d\n", heapSize); //if (heapSize) { // printf("PTN INT: %f %f %f\n", locateHeap[0]->intensity.x, locateHeap[0]->intensity.y, locateHeap[0]->intensity.z); //} //for (int i = 0; i < heapSize; i++) { //clr = accumulatePhotons(locateHeap, iPt, obj, shF, shI); /*for (volatile int i = 0; i < shI[2]; i++) { //BRDF glm::vec3 normal = obj->getNormal(obj, iPt, shF); normal = glm::normalize(normal); float dotProd = glm::dot(-locateHeap[i]->incidence, normal); //if (shI[0] < causts) { //color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); clr += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //} else { //color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //clr += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //} }*/ //color /= shF[8] * M_PI; //clr /= shF[8] * M_PI; //color *= (1.0 - obj->reflection) * scale; //clr *= (1.0 - reflectScale) * scale; //clr.x = clr.x * (1.0f - reflectScale) * scale; //absorbClr.x = color.x; //absorbClr.y = color.y; //absorbClr.z = color.z; //clr.x += color.x * dropoffCalc; //clr.y += color.y * dropoffCalc; //clr.z += color.z * dropoffCalc; //clr += color; free(locateHeap); //} /*else { if (depth > 0) { //Do Refraction reflectRay = findRefract(dir, normal, obj, n1, &n2, &reflectScale, &tempDO); if (fabs(reflectScale - 1.0f) >= TOLERANCE) { //Total internal reflection carry-over check shF[6] = iPt.x; shF[7] = iPt.y; shF[8] = iPt.z; col = trace(reflectRay, shI, shF); //col = trace(iPt, reflectRay, shI, shF); if (col->time >TOLERANCE) { //refractClr = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * (1.0f - reflectScale), n2, tempDO, threadNum, depth - 1, shI, shF); color = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * (1.0f - reflectScale), n2, tempDO, threadNum, depth - 1, shI, shF); clr.x += color.x * dropoffCalc; clr.y += color.y * dropoffCalc; clr.z += color.z * dropoffCalc; } delete(col); } } }*/ /*if ((obj->reflection > TOLERANCE || fabs(obj->refraction - 1.0) < TOLERANCE) && depth > 0) { //float randFloat; reflectRay = findReflect(dir, normal, obj); shF[6] = iPt.x; shF[7] = iPt.y; shF[8] = iPt.z; col = trace(reflectRay, shI, shF); //col = trace(iPt, reflectRay, shI, shF); if (col->time >= TOLERANCE) { //reflectClr = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * reflectScale, n1, dropoff, threadNum, depth - 1, shI, shF); color = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * reflectScale, n1, dropoff, threadNum, depth - 1, shI, shF); clr.x += color.x * dropoffCalc; clr.y += color.y * dropoffCalc; clr.z += color.z * dropoffCalc; } delete(col); }*/ //time = glm::length(iPt - start); //float dropoffCalc = pow(dropoff, time); //clr.x += (absorbClr.x + reflectClr.x + refractClr.x) * dropoffCalc; //clr.y += (absorbClr.y + reflectClr.y + refractClr.y) * dropoffCalc; //clr.z += (absorbClr.z + reflectClr.z + refractClr.z) * dropoffCalc; clr *= shF[0]; return clr; } /*glm::vec3 RayTracer::calcRadiance(glm::vec3 start, glm::vec3 iPt, SceneObject* obj, bool unit, float scale, float n1, float dropoff, int threadNum, int depth, int *shI, float *shF) { //float e = 2.71828; //float alpha = 0.918; //float beta = 1.953; int causts; float x = 0.0f, y = 0.0f, z = 0.0f, t = 0.0f, c = 0.0f, s = 0.0f; //printf("THREADNUM: %d\n", threadNum); //float sampleDistSqrd, newRadSqrd;//, scaleN; Photon** locateHeap;// = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*));; int heapSize; glm::vec3 eRay = glm::vec3(0.0, 0.0, 1.0); glm::mat3 mat, matInv; glm::vec3 clr, absorbClr, reflectClr, refractClr; float n2 = 0.0f, time = 0.0f, reflectScale = 0.0f, tempDO = 0.0f;//, dots1 = 0.0, dots2 = 0.0, temp, temp2, mainDist, mainT, dist, reflectance = 1.0f / obj->roughness, D, F, G, m, sroot, R = 0.0f, R0 = 0.0f, innersqr = 1.0f; glm::vec3 colorD, colorS, colorA, color, normal, reflectRay, newStart, newIPt, crossP; glm::vec4 tempNormal, tempStart, tempIPt; glm::vec3 pigment(obj->pigment.x, obj->pigment.y, obj->pigment.z); glm::vec3 l, v, h, lcol, dir; color = glm::vec3(0.0f, 0.0f, 0.0f); clr.x = clr.y = clr.z = 0.0; //locateHeap.clear(); //sampleDistSqrd = newRadSqrd = INITIAL_SAMPLE_DIST_SQRD; normal = obj->getNormal(obj, iPt, shF); normal = glm::normalize(normal); v = start - iPt; v = glm::normalize(v); dir = -v; Collision* col; //bool shadow; absorbClr.x = reflectClr.x = refractClr.x = 0.0; absorbClr.y = reflectClr.y = refractClr.y = 0.0; absorbClr.z = reflectClr.z = refractClr.z = 0.0; colorD = pigment * obj->diffuse; colorS = pigment * obj->specular; colorA = pigment * obj->ambient; //if (threadNum == 0) { // printf("STACK PART: %d\n", stackPartition); //} //if (threadNum == 0) { // printf("TREE START\n"); // root->printTree(root); // printf("TREE END\n"); //} //if (threadNum == 0) { if (fabs(1.0f - obj->refraction) > TOLERANCE || depth <= 0) { /*for (int lightnum = 0; lightnum < lights.size(); lightnum++) { dots1 = dots2 = 0.0f; shadow = false; lcol = lights[lightnum]->getColor(); lcol = Eigen::Vector3f(1.0f, 1.0f, 1.0f); l = (lights[lightnum]->getPosition() - iPt); mainDist = l.norm(); l.normalize(); h = (l + v); h.normalize(); l.normalize(); col = new Collision(); col->detectRayCollision(iPt, l, objects, -1, unit); if (col->time >= TOLERANCE && (l*col->time).norm() < mainDist) shadow = true; delete(col); if (shadow == false) { dots1 = l.dot(normal); if (dots1 < TOLERANCE) { dots1 = 0.0f; } temp = dots2 = h.dot(normal); if (dots2 < TOLERANCE) { dots2 = 0.0f; } else { for (int i = 0; i < int(reflectance); i++) { dots2 *= temp; } } } color.x() += ((colorA.x() / float(lights.size())) + (colorD.x() * dots1) + (colorS.x() * dots2)) * lcol.x(); color.y() += ((colorA.y() / float(lights.size())) + (colorD.y() * dots1) + (colorS.y() * dots2)) * lcol.y(); color.z() += ((colorA.z() / float(lights.size())) + (colorD.z() * dots1) + (colorS.z() * dots2)) * lcol.z(); }*//* reflectScale = obj->reflection; //glm::mat3 matInv; if (fabs(ELLIPSOID_SCALE - 1.0) > TOLERANCE && (fabs(fabs(normal[0]) - eRay[0]) > TOLERANCE || fabs(fabs(normal[1]) - eRay[1]) > TOLERANCE || fabs(fabs(normal[2]) - eRay[2]) > TOLERANCE)) { crossP = glm::cross(eRay, normal); crossP = glm::normalize(crossP); //float x = 0.0f, y = 0.0f, z = 0.0f, t = 0.0f, c = 0.0f, s = 0.0f; x = crossP.x; y = crossP.y; z = crossP.z; c = glm::dot(eRay, normal); s = sin(acos(glm::dot(eRay, normal))); t = 1.0 - c; mat = glm::mat3(t*x*x + c, t*x*y - z*s, t*x*z + y*s, t*x*y + z*s, t*y*y + c, t*y*z - x*s, t*x*z - y*s, t*y*z + x*s, t*z*z + c); matInv = glm::inverse(mat); } else { matInv = glm::mat3(1.0f); } //inv stuff //volatile glm::mat3 volatile mInv; //mInv.value[0].x = matInv[0][0]; //mInv.value[0].y = matInv[0][1]; //mInv.value[0].z = matInv[0][2]; //mInv.value[1].x = matInv[1][0]; //mInv.value[1].y = matInv[1][1]; //mInv.value[1].z = matInv[1][2]; //mInv.value[2].x = matInv[2][0]; //mInv.value[2].y = matInv[2][1]; //mInv.value[2].z = matInv[2][2]; volatile float* volatile mInv = glm::value_ptr(matInv); //printf("%d\n", threadNum); //Photon** locateHeap = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*)); locateHeap = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*)); //int heapSize = 0; heapSize = 0; //if (numCPhotons > 0) rootC1->locatePhotons(1, iPt, locateHeap, &heapSize, 0.05, &newRadSqrd, matInv, numCPhotons, cudaStack + (threadNum * stackPartition)); causts = heapSize; //printf("I'm guesssing here\n"); //sh[threadSpot] = iPt.x; //sh[threadSpot+1] = iPt.y; //sh[threadSpot+2] = iPt.z; shF[7] = INITIAL_SAMPLE_DIST_SQRD; shF[8] = INITIAL_SAMPLE_DIST_SQRD; shI[2] = 0;//heapSize; //if (numGPhotons > 0) root->locatePhotons(iPt, locateHeap, matInv, numGPhotons, shF + threadSpotF, shI + threadSpotI);// cudaStack + (threadNum * stackPartition)); if (numGPhotons > 0) root->locatePhotons(iPt, locateHeap, mInv, numGPhotons, shF, shI);// cudaStack + (threadNum * stackPartition)); //sh[threadSpot] = 0.1f; //if (numGPhotons > 0) root->locatePhotons(iPt, threadSpot, locateHeap, sampleDistSqrd, &newRadSqrd, numGPhotons, sh); heapSize = shI[2]; //printf("HS: %d\n", heapSize); //if (heapSize) { // printf("PTN INT: %f %f %f\n", locateHeap[0]->intensity.x, locateHeap[0]->intensity.y, locateHeap[0]->intensity.z); //} //for (int i = 0; i < heapSize; i++) { for (int i = 0; i < heapSize; i++) { //BRDF float dotProd = glm::dot(-locateHeap[i]->incidence, normal); //volatile float dotProd = -(locateHeap[i]->incidence.x) * normal.x;//glm::dot(-locateHeap[i]->incidence, normal); //dotProd += -(locateHeap[i]->incidence.y) * normal.y; //dotProd += -(locateHeap[i]->incidence.z) * normal.z; if (i < causts) { //float d = glm::length(locateHeap[i]->pt - iPt); //float w = alpha * (1 - ((1 - pow(e, -1 * beta * ((d * d) / (2 * newRadSqrd)))) / (1 - pow(e, -1 * beta)))); color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f);// * w;//* (1.0 - ((locateHeap[i]->pt - intersectPt).norm() / sqrt(newRadSqrd))); } else { //dotProd = (dotProd > 0.0f ? dotProd : 0.0f); //color.x += locateHeap[i]->intensity.x * dotProd; //color.y += locateHeap[i]->intensity.x * dotProd; //color.z += locateHeap[i]->intensity.x * dotProd; //if (dotProd < TOLERANCE) dotProd = 0.0f; //color += locateHeap[i]->intensity * dotProd; color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); } } color /= shF[8] * M_PI; color *= (1.0 - obj->reflection) * scale; absorbClr.x = color.x; absorbClr.y = color.y; absorbClr.z = color.z; free(locateHeap); } else { if (depth > 0) { //Do Refraction reflectRay = findRefract(dir, normal, obj, n1, &n2, &reflectScale, &tempDO); if (fabs(reflectScale - 1.0f) >= TOLERANCE) { //Total internal reflection carry-over check shF[6] = iPt.x; shF[7] = iPt.y; shF[8] = iPt.z; col = trace(reflectRay, shI, shF); //col = trace(iPt, reflectRay, shI, shF); if (col->time >TOLERANCE) { refractClr = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * (1.0f - reflectScale), n2, tempDO, threadNum, depth - 1, shI, shF); } delete(col); } } } if ((obj->reflection > TOLERANCE || fabs(obj->refraction - 1.0) < TOLERANCE) && depth > 0) { //float randFloat; reflectRay = findReflect(dir, normal, obj); shF[6] = iPt.x; shF[7] = iPt.y; shF[8] = iPt.z; col = trace(reflectRay, shI, shF); //col = trace(iPt, reflectRay, shI, shF); if (col->time >= TOLERANCE) { reflectClr = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * reflectScale, n1, dropoff, threadNum, depth - 1, shI, shF); } delete(col); } time = glm::length(iPt - start); float dropoffCalc = pow(dropoff, time); clr.x += (absorbClr.x + reflectClr.x + refractClr.x) * dropoffCalc; clr.y += (absorbClr.y + reflectClr.y + refractClr.y) * dropoffCalc; clr.z += (absorbClr.z + reflectClr.z + refractClr.z) * dropoffCalc; return clr; } //}*/ glm::vec3 RayTracer::findReflect(glm::vec3 ray, glm::vec3 normal, SceneObject* obj) { glm::vec3 reflectRay; reflectRay = ray + (2.0f*normal*(glm::dot(normal, -ray))); reflectRay = glm::normalize(reflectRay); return reflectRay; } glm::vec3 RayTracer::findRefract(glm::vec3 ray, glm::vec3 normalI, SceneObject* obj, float n1, float* n2, float* R, float* dropoff) { glm::vec3 refractRay, normal = normalI; float dots1, R0, sroot, innersqr; //Determine object-ray status dots1 = glm::dot(-ray, normal); if (dots1 < 0.0f) { //Exitting *n2 = 1.0f; //Assume no refract object collision *dropoff = 1.0f; normal *= -1.0f; dots1 = glm::dot(-ray, normal); } else { //Entering *n2 = obj->indexRefraction; *dropoff = obj->dropoff; } sroot = 1.0f - ((n1 / *n2) * (n1 / *n2) * (1.0f - (dots1 * dots1))); if (sroot < 0.0f) { //Total internal reflection check *R = 1.0f; } else { //Schlick Overhead R0 = (n1 - *n2) / (n1 + *n2); R0 *= R0; innersqr = 1.0f - dots1; innersqr = pow(innersqr, 5); *R = R0 + ((1.0f - R0) * innersqr); refractRay = ((n1 / *n2) * (ray + (normal * dots1))) - (normal * sqrt(sroot)); refractRay = glm::normalize(refractRay); } return refractRay; }
c2308de7b38f9c3d4f877fd6f754f0a929aa5958.cu
/* Cody Thompson Photon Mapping */ #include <iostream> //#include "RayTracer.h" #include "RayTracer.cuh" using namespace std; //extern __shared__ float sh[]; RayTracer::RayTracer(std::vector<Light*>* l, std::vector<SceneObject*>* o) { lights = &((*l)[0]); objects = &((*o)[0]); objSize = (*o).size(); /*printf("OBJ SIZE: %d\n", (*o).size()); printf("ADSFASDF %d\n", objSize); printf("TYPE %d\n", objects[0]->type); for (int i = 0; i < (*o).size(); i++) { if (!(*o)[i]) printf("FUCKKaadsfasdf %d\n", i); }*/ cudaStack = NULL; } RayTracer::RayTracer(std::vector<Light*>* l, std::vector<SceneObject*>* o, int gM, int cM, KDTreeNode* gr, KDTreeNode* cr) { lights = &((*l)[0]); objects = &((*o)[0]); objSize = (*o).size(); numGPhotons = gM; numCPhotons = cM; root = gr; rootC1 = cr; /*printf("OBJ SIZE: %d\n", (*o).size()); printf("ADSFASDF %d\n", objSize); printf("TYPE %d\n", objects[0]->type); for (int i = 0; i < (*o).size(); i++) { if (!objects[i]) printf("FUCKKaadsfasdf %d\n", i); }*/ cudaStack = NULL; } RayTracer::RayTracer(SceneObject** o, int osize, int gM, int cM, KDTreeNode* gr, KDTreeNode* cr) { objects = o; objSize = osize; numGPhotons = gM; numCPhotons = cM; root = gr; rootC1 = cr; cudaStack = NULL; } RayTracer::RayTracer() { cudaStack = NULL; } RayTracer::~RayTracer() { } //Collision* RayTracer::trace(glm::vec3 start, glm::vec3 ray, bool unit) { Collision* RayTracer::trace(glm::vec3 start, glm::vec3 ray, bool unit, int *shI, float *shF) { Collision* c = new Collision(); shF[6] = start.x; shF[7] = start.y; shF[8] = start.z; c->detectRayCollision2(ray, objects, objSize, -1, unit, shI, shF); return c; } Collision* RayTracer::trace(glm::vec3 ray, int *shI, float *shF) { //Collision* RayTracer::trace(glm::vec3 start, glm::vec3 ray, int *shI, float *shF) { //I01: Omit Index Collision* c = new Collision(); shI[2] = objSize; c->detectRayCollision(ray, objects, shI, shF); return c; } __device__ __noinline__ volatile float * volatile RayTracer::getMatInv(glm::vec3 iPt, SceneObject* obj, float *shF) { /* 7: c 8: s 9: t */ glm::mat3 matInv; glm::vec3 normal = obj->getNormal(obj, iPt, shF); glm::vec3 eRay = glm::vec3(0.0, 0.0, 1.0); normal = glm::normalize(normal); if (fabs(ELLIPSOID_SCALE - 1.0) > TOLERANCE && (fabs(fabs(normal[0]) - eRay[0]) > TOLERANCE || fabs(fabs(normal[1]) - eRay[1]) > TOLERANCE || fabs(fabs(normal[2]) - eRay[2]) > TOLERANCE)) { glm::vec3 crossP = glm::cross(eRay, normal); crossP = glm::normalize(crossP); shF[6] = glm::dot(eRay, normal); shF[7] = sin(acos(glm::dot(eRay, normal))); shF[8] = 1.0 - shF[6]; glm::mat3 mat = glm::mat3(shF[8]*crossP.x*crossP.x + shF[6], shF[8]*crossP.x*crossP.y - crossP.z*shF[7], shF[8]*crossP.x*crossP.z + crossP.y*shF[7], shF[8]*crossP.x*crossP.y + crossP.z*shF[7], shF[8]*crossP.y*crossP.y + shF[6], shF[8]*crossP.y*crossP.z - crossP.x*shF[7], shF[8]*crossP.x*crossP.z - crossP.y*shF[7], shF[8]*crossP.y*crossP.z + crossP.x*shF[7], shF[8]*crossP.z*crossP.z + shF[6]); matInv = glm::inverse(mat); } else { matInv = glm::mat3(1.0f); } //volatile float* volatile mInv;// = return glm::value_ptr(matInv); } __device__ __noinline__ glm::vec3 RayTracer::accumulatePhotons(Photon **locateHeap, glm::vec3 iPt, SceneObject* obj, float *shF, int *shI) { glm::vec3 clr(0.0f, 0.0f, 0.0f); for (volatile int i = 0; i < shI[2]; i++) { //BRDF glm::vec3 normal = obj->getNormal(obj, iPt, shF); normal = glm::normalize(normal); float dotProd = glm::dot(-locateHeap[i]->incidence, normal); //if (shI[0] < causts) { //color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); clr += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //} else { //color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //clr += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //} } return clr; } __attribute__ ((noinline)) void __attribute__ ((noinline)) RayTracer::getNormal(SceneObject *obj, glm::vec3 iPt, float *shF) { glm::vec3 normal = obj->getNormal(obj, iPt, shF); normal = glm::normalize(normal); shF[3] = normal.x; shF[4] = normal.y; shF[5] = normal.z; } glm::vec3 RayTracer::calcRadiance(glm::vec3 start, glm::vec3 iPt, SceneObject* obj, bool unit, float scale, float n1, float dropoff, int threadNum, int depth, int *shI, float *shF) { /*float e = 2.71828; float alpha = 0.918; float beta = 1.953;*/ //int causts; //volatile float x = 0.0f, y = 0.0f, z = 0.0f, t = 0.0f, c = 0.0f, s = 0.0f; //printf("THREADNUM: %d\n", threadNum); //float sampleDistSqrd, newRadSqrd;//, scaleN; //Photon** locateHeap;// = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*));; //int heapSize; //glm::vec3 eRay = glm::vec3(0.0, 0.0, 1.0); //glm::mat3 mat, matInv; //volatile float reflectScale = obj->reflection; //volatile float * volatile matInv; //if (1) matInv = (volatile float * volatile)glm::value_ptr(glm::mat3(1.0f));//getMatInv(iPt, obj, shF); //if (1) matInv = getMatInv(iPt, obj, shF); //glm::vec3 normal = obj->getNormal(obj, iPt, shF); getNormal(obj, iPt, shF); //normal = glm::normalize(normal); shF[1] = obj->reflection; //volatile float refract = obj->refraction; shF[2] = obj->refraction; //float dropoffCalc = glm::length(iPt - start); shF[0] = glm::length(iPt - start); //dropoffCalc = pow(dropoff, dropoffCalc); shF[0] = pow(dropoff, shF[0]); //glm::vec3 clr, absorbClr, reflectClr, refractClr; glm::vec3 clr(0.0f, 0.0f, 0.0f); //float n2 = 0.0f;//, reflectScale = 0.0f, tempDO = 0.0f;//, dots1 = 0.0, dots2 = 0.0, temp, temp2, mainDist, mainT, dist, reflectance = 1.0f / obj->roughness, D, F, G, m, sroot, R = 0.0f, R0 = 0.0f, innersqr = 1.0f; //glm::vec3 colorD, colorS, colorA, color, normal, reflectRay, newStart, newIPt, crossP; //glm::vec4 tempNormal, tempStart, tempIPt; //glm::vec3 pigment(obj->pigment.x, obj->pigment.y, obj->pigment.z); //glm::vec3 l, v, h, lcol, dir; //color = glm::vec3(0.0f, 0.0f, 0.0f); //glm::vec3 normal = obj->getNormal(obj, iPt, shF); //normal = glm::normalize(normal); //v = start - iPt; //v = glm::normalize(v); //dir = -v; //Collision* col; //float time = glm::length(iPt - start); //float dropoffCalc = pow(dropoff, time); //absorbClr.x = reflectClr.x = refractClr.x = 0.0; //absorbClr.y = reflectClr.y = refractClr.y = 0.0; //absorbClr.z = reflectClr.z = refractClr.z = 0.0; //colorD = pigment * obj->diffuse; //colorS = pigment * obj->specular; //colorA = pigment * obj->ambient; //if (fabs(1.0f - refract) > TOLERANCE || depth <= 0) { //reflectScale = obj->reflection; //printf("%d\n", threadNum); //Photon** locateHeap = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*)); Photon** locateHeap = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*)); //int heapSize = 0; //heapSize = 0; //if (numCPhotons > 0) rootC1->locatePhotons(1, iPt, locateHeap, &heapSize, 0.05, &newRadSqrd, matInv, numCPhotons, cudaStack + (threadNum * stackPartition)); //causts = heapSize; //volatile int causts = 0;//shI[2]; //sh[threadSpot] = iPt.x; //sh[threadSpot+1] = iPt.y; //sh[threadSpot+2] = iPt.z; shF[7] = INITIAL_SAMPLE_DIST_SQRD; shF[8] = INITIAL_SAMPLE_DIST_SQRD; shI[2] = 0;//heapSize; //asm volatile("membar.cta;"); volatile float * volatile matInv; if (1) matInv = (volatile float * volatile)glm::value_ptr(glm::mat3(1.0f));//getMatInv(iPt, obj, shF); //matInv = getMatInv(iPt, obj, shF); //if (numGPhotons > 0) root->locatePhotons(iPt, locateHeap, matInv, numGPhotons, shF + threadSpotF, shI + threadSpotI);// cudaStack + (threadNum * stackPartition)); shI[0] = numGPhotons; //asm volatile("membar.cta;"); if (numGPhotons) root->locatePhotons(iPt, locateHeap, matInv, numGPhotons, shF, shI);// cudaStack + (threadNum * stackPartition)); //printf("HEAPSIZE FAM: %d\n", heapSize); //sh[threadSpot] = 0.1f; //if (numGPhotons > 0) root->locatePhotons(iPt, threadSpot, locateHeap, sampleDistSqrd, &newRadSqrd, numGPhotons, sh); //printf("sheeeet\n"); //heapSize = shI[2]; //printf("HS: %d\n", heapSize); //if (heapSize) { // printf("PTN INT: %f %f %f\n", locateHeap[0]->intensity.x, locateHeap[0]->intensity.y, locateHeap[0]->intensity.z); //} //for (int i = 0; i < heapSize; i++) { //clr = accumulatePhotons(locateHeap, iPt, obj, shF, shI); /*for (volatile int i = 0; i < shI[2]; i++) { //BRDF glm::vec3 normal = obj->getNormal(obj, iPt, shF); normal = glm::normalize(normal); float dotProd = glm::dot(-locateHeap[i]->incidence, normal); //if (shI[0] < causts) { //color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); clr += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //} else { //color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //clr += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); //} }*/ //color /= shF[8] * M_PI; //clr /= shF[8] * M_PI; //color *= (1.0 - obj->reflection) * scale; //clr *= (1.0 - reflectScale) * scale; //clr.x = clr.x * (1.0f - reflectScale) * scale; //absorbClr.x = color.x; //absorbClr.y = color.y; //absorbClr.z = color.z; //clr.x += color.x * dropoffCalc; //clr.y += color.y * dropoffCalc; //clr.z += color.z * dropoffCalc; //clr += color; free(locateHeap); //} /*else { if (depth > 0) { //Do Refraction reflectRay = findRefract(dir, normal, obj, n1, &n2, &reflectScale, &tempDO); if (fabs(reflectScale - 1.0f) >= TOLERANCE) { //Total internal reflection carry-over check shF[6] = iPt.x; shF[7] = iPt.y; shF[8] = iPt.z; col = trace(reflectRay, shI, shF); //col = trace(iPt, reflectRay, shI, shF); if (col->time >TOLERANCE) { //refractClr = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * (1.0f - reflectScale), n2, tempDO, threadNum, depth - 1, shI, shF); color = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * (1.0f - reflectScale), n2, tempDO, threadNum, depth - 1, shI, shF); clr.x += color.x * dropoffCalc; clr.y += color.y * dropoffCalc; clr.z += color.z * dropoffCalc; } delete(col); } } }*/ /*if ((obj->reflection > TOLERANCE || fabs(obj->refraction - 1.0) < TOLERANCE) && depth > 0) { //float randFloat; reflectRay = findReflect(dir, normal, obj); shF[6] = iPt.x; shF[7] = iPt.y; shF[8] = iPt.z; col = trace(reflectRay, shI, shF); //col = trace(iPt, reflectRay, shI, shF); if (col->time >= TOLERANCE) { //reflectClr = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * reflectScale, n1, dropoff, threadNum, depth - 1, shI, shF); color = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * reflectScale, n1, dropoff, threadNum, depth - 1, shI, shF); clr.x += color.x * dropoffCalc; clr.y += color.y * dropoffCalc; clr.z += color.z * dropoffCalc; } delete(col); }*/ //time = glm::length(iPt - start); //float dropoffCalc = pow(dropoff, time); //clr.x += (absorbClr.x + reflectClr.x + refractClr.x) * dropoffCalc; //clr.y += (absorbClr.y + reflectClr.y + refractClr.y) * dropoffCalc; //clr.z += (absorbClr.z + reflectClr.z + refractClr.z) * dropoffCalc; clr *= shF[0]; return clr; } /*glm::vec3 RayTracer::calcRadiance(glm::vec3 start, glm::vec3 iPt, SceneObject* obj, bool unit, float scale, float n1, float dropoff, int threadNum, int depth, int *shI, float *shF) { //float e = 2.71828; //float alpha = 0.918; //float beta = 1.953; int causts; float x = 0.0f, y = 0.0f, z = 0.0f, t = 0.0f, c = 0.0f, s = 0.0f; //printf("THREADNUM: %d\n", threadNum); //float sampleDistSqrd, newRadSqrd;//, scaleN; Photon** locateHeap;// = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*));; int heapSize; glm::vec3 eRay = glm::vec3(0.0, 0.0, 1.0); glm::mat3 mat, matInv; glm::vec3 clr, absorbClr, reflectClr, refractClr; float n2 = 0.0f, time = 0.0f, reflectScale = 0.0f, tempDO = 0.0f;//, dots1 = 0.0, dots2 = 0.0, temp, temp2, mainDist, mainT, dist, reflectance = 1.0f / obj->roughness, D, F, G, m, sroot, R = 0.0f, R0 = 0.0f, innersqr = 1.0f; glm::vec3 colorD, colorS, colorA, color, normal, reflectRay, newStart, newIPt, crossP; glm::vec4 tempNormal, tempStart, tempIPt; glm::vec3 pigment(obj->pigment.x, obj->pigment.y, obj->pigment.z); glm::vec3 l, v, h, lcol, dir; color = glm::vec3(0.0f, 0.0f, 0.0f); clr.x = clr.y = clr.z = 0.0; //locateHeap.clear(); //sampleDistSqrd = newRadSqrd = INITIAL_SAMPLE_DIST_SQRD; normal = obj->getNormal(obj, iPt, shF); normal = glm::normalize(normal); v = start - iPt; v = glm::normalize(v); dir = -v; Collision* col; //bool shadow; absorbClr.x = reflectClr.x = refractClr.x = 0.0; absorbClr.y = reflectClr.y = refractClr.y = 0.0; absorbClr.z = reflectClr.z = refractClr.z = 0.0; colorD = pigment * obj->diffuse; colorS = pigment * obj->specular; colorA = pigment * obj->ambient; //if (threadNum == 0) { // printf("STACK PART: %d\n", stackPartition); //} //if (threadNum == 0) { // printf("TREE START\n"); // root->printTree(root); // printf("TREE END\n"); //} //if (threadNum == 0) { if (fabs(1.0f - obj->refraction) > TOLERANCE || depth <= 0) { /*for (int lightnum = 0; lightnum < lights.size(); lightnum++) { dots1 = dots2 = 0.0f; shadow = false; lcol = lights[lightnum]->getColor(); lcol = Eigen::Vector3f(1.0f, 1.0f, 1.0f); l = (lights[lightnum]->getPosition() - iPt); mainDist = l.norm(); l.normalize(); h = (l + v); h.normalize(); l.normalize(); col = new Collision(); col->detectRayCollision(iPt, l, objects, -1, unit); if (col->time >= TOLERANCE && (l*col->time).norm() < mainDist) shadow = true; delete(col); if (shadow == false) { dots1 = l.dot(normal); if (dots1 < TOLERANCE) { dots1 = 0.0f; } temp = dots2 = h.dot(normal); if (dots2 < TOLERANCE) { dots2 = 0.0f; } else { for (int i = 0; i < int(reflectance); i++) { dots2 *= temp; } } } color.x() += ((colorA.x() / float(lights.size())) + (colorD.x() * dots1) + (colorS.x() * dots2)) * lcol.x(); color.y() += ((colorA.y() / float(lights.size())) + (colorD.y() * dots1) + (colorS.y() * dots2)) * lcol.y(); color.z() += ((colorA.z() / float(lights.size())) + (colorD.z() * dots1) + (colorS.z() * dots2)) * lcol.z(); }*//* reflectScale = obj->reflection; //glm::mat3 matInv; if (fabs(ELLIPSOID_SCALE - 1.0) > TOLERANCE && (fabs(fabs(normal[0]) - eRay[0]) > TOLERANCE || fabs(fabs(normal[1]) - eRay[1]) > TOLERANCE || fabs(fabs(normal[2]) - eRay[2]) > TOLERANCE)) { crossP = glm::cross(eRay, normal); crossP = glm::normalize(crossP); //float x = 0.0f, y = 0.0f, z = 0.0f, t = 0.0f, c = 0.0f, s = 0.0f; x = crossP.x; y = crossP.y; z = crossP.z; c = glm::dot(eRay, normal); s = sin(acos(glm::dot(eRay, normal))); t = 1.0 - c; mat = glm::mat3(t*x*x + c, t*x*y - z*s, t*x*z + y*s, t*x*y + z*s, t*y*y + c, t*y*z - x*s, t*x*z - y*s, t*y*z + x*s, t*z*z + c); matInv = glm::inverse(mat); } else { matInv = glm::mat3(1.0f); } //inv stuff //volatile glm::mat3 volatile mInv; //mInv.value[0].x = matInv[0][0]; //mInv.value[0].y = matInv[0][1]; //mInv.value[0].z = matInv[0][2]; //mInv.value[1].x = matInv[1][0]; //mInv.value[1].y = matInv[1][1]; //mInv.value[1].z = matInv[1][2]; //mInv.value[2].x = matInv[2][0]; //mInv.value[2].y = matInv[2][1]; //mInv.value[2].z = matInv[2][2]; volatile float* volatile mInv = glm::value_ptr(matInv); //printf("%d\n", threadNum); //Photon** locateHeap = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*)); locateHeap = (Photon**)malloc(CUTOFF_HEAP_SIZE * sizeof(Photon*)); //int heapSize = 0; heapSize = 0; //if (numCPhotons > 0) rootC1->locatePhotons(1, iPt, locateHeap, &heapSize, 0.05, &newRadSqrd, matInv, numCPhotons, cudaStack + (threadNum * stackPartition)); causts = heapSize; //printf("I'm guesssing here\n"); //sh[threadSpot] = iPt.x; //sh[threadSpot+1] = iPt.y; //sh[threadSpot+2] = iPt.z; shF[7] = INITIAL_SAMPLE_DIST_SQRD; shF[8] = INITIAL_SAMPLE_DIST_SQRD; shI[2] = 0;//heapSize; //if (numGPhotons > 0) root->locatePhotons(iPt, locateHeap, matInv, numGPhotons, shF + threadSpotF, shI + threadSpotI);// cudaStack + (threadNum * stackPartition)); if (numGPhotons > 0) root->locatePhotons(iPt, locateHeap, mInv, numGPhotons, shF, shI);// cudaStack + (threadNum * stackPartition)); //sh[threadSpot] = 0.1f; //if (numGPhotons > 0) root->locatePhotons(iPt, threadSpot, locateHeap, sampleDistSqrd, &newRadSqrd, numGPhotons, sh); heapSize = shI[2]; //printf("HS: %d\n", heapSize); //if (heapSize) { // printf("PTN INT: %f %f %f\n", locateHeap[0]->intensity.x, locateHeap[0]->intensity.y, locateHeap[0]->intensity.z); //} //for (int i = 0; i < heapSize; i++) { for (int i = 0; i < heapSize; i++) { //BRDF float dotProd = glm::dot(-locateHeap[i]->incidence, normal); //volatile float dotProd = -(locateHeap[i]->incidence.x) * normal.x;//glm::dot(-locateHeap[i]->incidence, normal); //dotProd += -(locateHeap[i]->incidence.y) * normal.y; //dotProd += -(locateHeap[i]->incidence.z) * normal.z; if (i < causts) { //float d = glm::length(locateHeap[i]->pt - iPt); //float w = alpha * (1 - ((1 - pow(e, -1 * beta * ((d * d) / (2 * newRadSqrd)))) / (1 - pow(e, -1 * beta)))); color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f);// * w;//* (1.0 - ((locateHeap[i]->pt - intersectPt).norm() / sqrt(newRadSqrd))); } else { //dotProd = (dotProd > 0.0f ? dotProd : 0.0f); //color.x += locateHeap[i]->intensity.x * dotProd; //color.y += locateHeap[i]->intensity.x * dotProd; //color.z += locateHeap[i]->intensity.x * dotProd; //if (dotProd < TOLERANCE) dotProd = 0.0f; //color += locateHeap[i]->intensity * dotProd; color += (locateHeap[i]->intensity) * (dotProd > 0.0f ? dotProd : 0.0f); } } color /= shF[8] * M_PI; color *= (1.0 - obj->reflection) * scale; absorbClr.x = color.x; absorbClr.y = color.y; absorbClr.z = color.z; free(locateHeap); } else { if (depth > 0) { //Do Refraction reflectRay = findRefract(dir, normal, obj, n1, &n2, &reflectScale, &tempDO); if (fabs(reflectScale - 1.0f) >= TOLERANCE) { //Total internal reflection carry-over check shF[6] = iPt.x; shF[7] = iPt.y; shF[8] = iPt.z; col = trace(reflectRay, shI, shF); //col = trace(iPt, reflectRay, shI, shF); if (col->time >TOLERANCE) { refractClr = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * (1.0f - reflectScale), n2, tempDO, threadNum, depth - 1, shI, shF); } delete(col); } } } if ((obj->reflection > TOLERANCE || fabs(obj->refraction - 1.0) < TOLERANCE) && depth > 0) { //float randFloat; reflectRay = findReflect(dir, normal, obj); shF[6] = iPt.x; shF[7] = iPt.y; shF[8] = iPt.z; col = trace(reflectRay, shI, shF); //col = trace(iPt, reflectRay, shI, shF); if (col->time >= TOLERANCE) { reflectClr = calcRadiance(iPt, iPt + reflectRay * col->time, col->object, unit, scale * reflectScale, n1, dropoff, threadNum, depth - 1, shI, shF); } delete(col); } time = glm::length(iPt - start); float dropoffCalc = pow(dropoff, time); clr.x += (absorbClr.x + reflectClr.x + refractClr.x) * dropoffCalc; clr.y += (absorbClr.y + reflectClr.y + refractClr.y) * dropoffCalc; clr.z += (absorbClr.z + reflectClr.z + refractClr.z) * dropoffCalc; return clr; } //}*/ glm::vec3 RayTracer::findReflect(glm::vec3 ray, glm::vec3 normal, SceneObject* obj) { glm::vec3 reflectRay; reflectRay = ray + (2.0f*normal*(glm::dot(normal, -ray))); reflectRay = glm::normalize(reflectRay); return reflectRay; } glm::vec3 RayTracer::findRefract(glm::vec3 ray, glm::vec3 normalI, SceneObject* obj, float n1, float* n2, float* R, float* dropoff) { glm::vec3 refractRay, normal = normalI; float dots1, R0, sroot, innersqr; //Determine object-ray status dots1 = glm::dot(-ray, normal); if (dots1 < 0.0f) { //Exitting *n2 = 1.0f; //Assume no refract object collision *dropoff = 1.0f; normal *= -1.0f; dots1 = glm::dot(-ray, normal); } else { //Entering *n2 = obj->indexRefraction; *dropoff = obj->dropoff; } sroot = 1.0f - ((n1 / *n2) * (n1 / *n2) * (1.0f - (dots1 * dots1))); if (sroot < 0.0f) { //Total internal reflection check *R = 1.0f; } else { //Schlick Overhead R0 = (n1 - *n2) / (n1 + *n2); R0 *= R0; innersqr = 1.0f - dots1; innersqr = pow(innersqr, 5); *R = R0 + ((1.0f - R0) * innersqr); refractRay = ((n1 / *n2) * (ray + (normal * dots1))) - (normal * sqrt(sroot)); refractRay = glm::normalize(refractRay); } return refractRay; }
23237a98e061c17337ffcc3364df6e93038db0ff.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/div_rtn.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/im2col.cuh> #include <ATen/native/im2col_shape_check.h> namespace at { namespace native { namespace { static void im2col_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { TORCH_CHECK( kernel_size.size() == 2, "It is expected kernel_size equals to 2, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 2, "It is expected dilation equals to 2, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 2, "It is expected padding equals to 2, but got size ", padding.size()); TORCH_CHECK( stride.size() == 2, "It is expected stride equals to 2, but got size ", stride.size()); int64_t kernel_height = kernel_size[0]; int64_t kernel_width = kernel_size[1]; int64_t dilation_height = dilation[0]; int64_t dilation_width = dilation[1]; int64_t pad_height = padding[0]; int64_t pad_width = padding[1]; int64_t stride_height = stride[0]; int64_t stride_width = stride[1]; TensorArg input_arg{input_, "input", 1}; TensorArg output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); im2col_shape_check( input_, Tensor(), kernel_height, kernel_width, dilation_height, dilation_width, pad_height, pad_width, stride_height, stride_width); Tensor input = input_.contiguous(); bool batched_input = true; if (input.dim() == 3) { batched_input = false; input.resize_({1, input.size(0), input.size(1), input.size(2)}); } int64_t batch_size = input.size(0); int64_t n_input_plane = input.size(1); int64_t input_height = input.size(2); int64_t input_width = input.size(3); int64_t output_height = (input_height + 2 * pad_height - (dilation_height * (kernel_height - 1) + 1)) / stride_height + 1; int64_t output_width = (input_width + 2 * pad_width - (dilation_width * (kernel_width - 1) + 1)) / stride_width + 1; int64_t n_output_plane = n_input_plane * kernel_width * kernel_height; int64_t output_length = output_height * output_width; output.resize_({batch_size, n_output_plane, output_length}); output.zero_(); // Launch kernel AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "im2col_out_cuda", [&] { Tensor input_n; Tensor output_n; for (int64_t elt = 0; elt < batch_size; elt++) { input_n = input.select(0, elt); output_n = output.select(0, elt); im2col<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_n.data_ptr<scalar_t>(), n_input_plane, input_height, input_width, output_height, output_width, kernel_height, kernel_width, pad_height, pad_width, stride_height, stride_width, dilation_height, dilation_width, output_n.data_ptr<scalar_t>()); } if (!batched_input) { output.resize_({n_output_plane, output_length}); } }); } static void im2col_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { TORCH_CHECK( input_size.size() == 2, "It is expected input_size equals to 2, but got size ", input_size.size()); // col2im_out_cuda checks size of kernel_size, dilation, padding and stride at::native::col2im_out_cuda( grad_output, input_size, kernel_size, dilation, padding, stride, grad_input); } } // namespace Tensor& im2col_out_cuda(const Tensor& input, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor& output) { im2col_out_cuda_template( output, input, kernel_size, dilation, padding, stride); return output; } Tensor im2col_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); im2col_out_cuda_template( output, input, kernel_size, dilation, padding, stride); return output; } Tensor& im2col_backward_out_cuda(const Tensor& grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor& grad_input) { im2col_backward_out_cuda_template( grad_input, grad_output, input_size, kernel_size, dilation, padding, stride); return grad_input; } Tensor im2col_backward_cuda( const Tensor& grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); im2col_backward_out_cuda_template( grad_input, grad_output, input_size, kernel_size, dilation, padding, stride); return grad_input; } } // namespace native } // namespace at
23237a98e061c17337ffcc3364df6e93038db0ff.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/div_rtn.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/im2col.cuh> #include <ATen/native/im2col_shape_check.h> namespace at { namespace native { namespace { static void im2col_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { TORCH_CHECK( kernel_size.size() == 2, "It is expected kernel_size equals to 2, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 2, "It is expected dilation equals to 2, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 2, "It is expected padding equals to 2, but got size ", padding.size()); TORCH_CHECK( stride.size() == 2, "It is expected stride equals to 2, but got size ", stride.size()); int64_t kernel_height = kernel_size[0]; int64_t kernel_width = kernel_size[1]; int64_t dilation_height = dilation[0]; int64_t dilation_width = dilation[1]; int64_t pad_height = padding[0]; int64_t pad_width = padding[1]; int64_t stride_height = stride[0]; int64_t stride_width = stride[1]; TensorArg input_arg{input_, "input", 1}; TensorArg output_arg{output, "output", 2}; checkAllSameGPU(__func__, {input_arg, output_arg}); im2col_shape_check( input_, Tensor(), kernel_height, kernel_width, dilation_height, dilation_width, pad_height, pad_width, stride_height, stride_width); Tensor input = input_.contiguous(); bool batched_input = true; if (input.dim() == 3) { batched_input = false; input.resize_({1, input.size(0), input.size(1), input.size(2)}); } int64_t batch_size = input.size(0); int64_t n_input_plane = input.size(1); int64_t input_height = input.size(2); int64_t input_width = input.size(3); int64_t output_height = (input_height + 2 * pad_height - (dilation_height * (kernel_height - 1) + 1)) / stride_height + 1; int64_t output_width = (input_width + 2 * pad_width - (dilation_width * (kernel_width - 1) + 1)) / stride_width + 1; int64_t n_output_plane = n_input_plane * kernel_width * kernel_height; int64_t output_length = output_height * output_width; output.resize_({batch_size, n_output_plane, output_length}); output.zero_(); // Launch kernel AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "im2col_out_cuda", [&] { Tensor input_n; Tensor output_n; for (int64_t elt = 0; elt < batch_size; elt++) { input_n = input.select(0, elt); output_n = output.select(0, elt); im2col<scalar_t>( at::cuda::getCurrentCUDAStream(), input_n.data_ptr<scalar_t>(), n_input_plane, input_height, input_width, output_height, output_width, kernel_height, kernel_width, pad_height, pad_width, stride_height, stride_width, dilation_height, dilation_width, output_n.data_ptr<scalar_t>()); } if (!batched_input) { output.resize_({n_output_plane, output_length}); } }); } static void im2col_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { TORCH_CHECK( input_size.size() == 2, "It is expected input_size equals to 2, but got size ", input_size.size()); // col2im_out_cuda checks size of kernel_size, dilation, padding and stride at::native::col2im_out_cuda( grad_output, input_size, kernel_size, dilation, padding, stride, grad_input); } } // namespace Tensor& im2col_out_cuda(const Tensor& input, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor& output) { im2col_out_cuda_template( output, input, kernel_size, dilation, padding, stride); return output; } Tensor im2col_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); im2col_out_cuda_template( output, input, kernel_size, dilation, padding, stride); return output; } Tensor& im2col_backward_out_cuda(const Tensor& grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor& grad_input) { im2col_backward_out_cuda_template( grad_input, grad_output, input_size, kernel_size, dilation, padding, stride); return grad_input; } Tensor im2col_backward_cuda( const Tensor& grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); im2col_backward_out_cuda_template( grad_input, grad_output, input_size, kernel_size, dilation, padding, stride); return grad_input; } } // namespace native } // namespace at
train.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> // need to add -lcurand to nvcc flags #include <rocblas.h> // need to add -lcublas to nvcc flags #include "assert.h" #include "train.cuh" #include "error.cuh" #include "poisson.cuh" #include "polyaurn.cuh" #include "random_hip.cuh" #include "spalias.cuh" #include "topics.cuh" #include "tuning.cuh" namespace gpulda { // global variables Args* args; // externally visible f32* Phi_dense; u32* n_dense; f32* Phi_temp; Poisson* pois; SpAlias* alias; f32* sigma_a; u32* C; hiprandStatePhilox4_32_10_t* Phi_rng; hipStream_t* Phi_stream; hipblasHandle_t* cublas_handle; f32* d_one; f32* d_zero; extern "C" void initialize(Args* init_args, Buffer* buffers, u32 n_buffers) { // set the pointer to args struct args = init_args; // set heap size for hashmaps size_t heap_size; size_t minimum_heap_size = ((size_t) args->max_D) * ((size_t) GPULDA_D_HEAP_SIZE) * ((size_t) 2); hipDeviceGetLimit(&heap_size, hipLimitMallocHeapSize) >> GPULDA_CHECK; if(heap_size < minimum_heap_size) { hipDeviceSetLimit(hipLimitMallocHeapSize, minimum_heap_size) >> GPULDA_CHECK; hipDeviceGetLimit(&heap_size, hipLimitMallocHeapSize) >> GPULDA_CHECK; if(heap_size < minimum_heap_size) { hipErrorMemoryAllocation >> GPULDA_CHECK; } } // allocate and initialize cuBLAS cublas_handle = new hipblasHandle_t; hipblasCreate(cublas_handle) >> GPULDA_CHECK; hipblasSetPointerMode(*cublas_handle, HIPBLAS_POINTER_MODE_DEVICE) >> GPULDA_CHECK; f32 h_zero = 0.0f; hipMalloc(&d_zero, sizeof(f32)) >> GPULDA_CHECK; hipMemcpy(d_zero, &h_zero, sizeof(f32), hipMemcpyHostToDevice) >> GPULDA_CHECK; f32 h_one = 1.0f; hipMalloc(&d_one, sizeof(f32)) >> GPULDA_CHECK; hipMemcpy(d_one, &h_one, sizeof(f32), hipMemcpyHostToDevice) >> GPULDA_CHECK; hipMalloc(&Phi_temp, args->K * args->V * sizeof(f32)) >> GPULDA_CHECK; // allocate and initialize cuRAND hipMalloc(&Phi_rng, sizeof(hiprandStatePhilox4_32_10_t)) >> GPULDA_CHECK; hipLaunchKernelGGL(( rng_init), dim3(1),dim3(1), 0, 0, 0, 0, Phi_rng); hipDeviceSynchronize() >> GPULDA_CHECK; // allocate and initialize streams Phi_stream = new hipStream_t; hipStreamCreate(Phi_stream) >> GPULDA_CHECK; // allocate memory for buffers for(i32 i = 0; i < n_buffers; ++i) { buffers[i].stream = new hipStream_t; hipStreamCreate(buffers[i].stream) >> GPULDA_CHECK; hipMalloc(&buffers[i].gpu_z, args->buffer_size * sizeof(u32)) >> GPULDA_CHECK; hipMalloc(&buffers[i].gpu_w, args->buffer_size * sizeof(u32)) >> GPULDA_CHECK; hipMalloc(&buffers[i].gpu_d_len, args->max_D * sizeof(u32)) >> GPULDA_CHECK; hipMalloc(&buffers[i].gpu_d_idx, args->max_D * sizeof(u32)) >> GPULDA_CHECK; hipMalloc(&buffers[i].gpu_K_d, args->max_D * sizeof(u32)) >> GPULDA_CHECK; hipMalloc(&buffers[i].gpu_rng, sizeof(hiprandStatePhilox4_32_10_t)) >> GPULDA_CHECK; hipLaunchKernelGGL(( rng_init), dim3(1),dim3(1), 0, 0, 0, i + 1, buffers[i].gpu_rng); hipDeviceSynchronize() >> GPULDA_CHECK; } // allocate globals hipMalloc(&Phi_dense, args->K * args->V * sizeof(f32)) >> GPULDA_CHECK; hipMalloc(&n_dense, args->K * args->V * sizeof(u32)) >> GPULDA_CHECK; pois = new Poisson(GPULDA_POIS_MAX_LAMBDA, GPULDA_POIS_MAX_VALUE, args->beta); alias = new SpAlias(args->V, args->K); hipMalloc(&sigma_a,args->V * sizeof(f32)) >> GPULDA_CHECK; hipMalloc(&C,args->V * sizeof(u32)) >> GPULDA_CHECK; hipMemcpy(C, args->C, args->V * sizeof(u32), hipMemcpyHostToDevice) >> GPULDA_CHECK; // run device init code hipLaunchKernelGGL(( polya_urn_init), dim3(args->K),dim3(GPULDA_POLYA_URN_SAMPLE_BLOCKDIM), 0, 0, n_dense, C, args->K, args->beta, args->V, pois->pois_alias->prob, pois->pois_alias->alias, pois->max_lambda, pois->max_value, Phi_rng); hipDeviceSynchronize() >> GPULDA_CHECK; hipLaunchKernelGGL(( rng_advance), dim3(1),dim3(1), 0, 0, args->K*args->V,Phi_rng); hipDeviceSynchronize() >> GPULDA_CHECK; } extern "C" void cleanup(Buffer* buffers, u32 n_buffers) { // deallocate globals hipFree(C) >> GPULDA_CHECK; hipFree(sigma_a) >> GPULDA_CHECK; delete alias; delete pois; hipFree(n_dense) >> GPULDA_CHECK; hipFree(Phi_dense) >> GPULDA_CHECK; // deallocate memory for buffers for(i32 i = 0; i < n_buffers; ++i) { hipFree(buffers[i].gpu_z) >> GPULDA_CHECK; hipFree(buffers[i].gpu_w) >> GPULDA_CHECK; hipFree(buffers[i].gpu_d_len) >> GPULDA_CHECK; hipFree(buffers[i].gpu_d_idx) >> GPULDA_CHECK; hipFree(buffers[i].gpu_K_d) >> GPULDA_CHECK; hipFree(buffers[i].gpu_rng) >> GPULDA_CHECK; hipStreamDestroy(*buffers[i].stream) >> GPULDA_CHECK; delete buffers[i].stream; } // deallocate streams hipStreamDestroy(*Phi_stream) >> GPULDA_CHECK; delete Phi_stream; // deallocate cuRAND hipFree(Phi_rng) >> GPULDA_CHECK; // deallocate cuBLAS hipFree(Phi_temp) >> GPULDA_CHECK; hipFree(d_zero) >> GPULDA_CHECK; hipFree(d_one) >> GPULDA_CHECK; hipblasDestroy(*cublas_handle) >> GPULDA_CHECK; delete cublas_handle; // remove the args pointer args = NULL; } extern "C" void sample_phi() { // draw Phi ~ PPU(n + beta) hipLaunchKernelGGL(( polya_urn_sample), dim3(args->K),dim3(GPULDA_POLYA_URN_SAMPLE_BLOCKDIM),0,*Phi_stream, Phi_dense, n_dense, args->beta, args->V, pois->pois_alias->prob, pois->pois_alias->alias, pois->max_lambda, pois->max_value, Phi_rng); hipLaunchKernelGGL(( rng_advance), dim3(1),dim3(1),0,*Phi_stream, args->K*args->V,Phi_rng); // copy Phi for transpose, set the stream, then transpose Phi polya_urn_transpose(Phi_stream, Phi_dense, Phi_temp, args->K, args->V, cublas_handle, d_zero, d_one); // compute sigma_a and alias probabilities hipLaunchKernelGGL(( polya_urn_colsums), dim3(args->V),dim3(GPULDA_POLYA_URN_COLSUMS_BLOCKDIM),0,*Phi_stream, Phi_dense, sigma_a, args->alpha, alias->prob, args->K); // build Alias tables hipLaunchKernelGGL(( build_alias), dim3(args->V),dim3(32),2*next_pow2(args->K)*sizeof(i32), *Phi_stream, alias->prob, alias->alias, args->K); // reset sufficient statistics for n hipLaunchKernelGGL(( polya_urn_reset), dim3(args->K),dim3(128),0,*Phi_stream, n_dense, args->V); // don't return until operations completed hipStreamSynchronize(*Phi_stream) >> GPULDA_CHECK; } extern "C" void sample_z_async(Buffer* buffer) { // copy z,w,d to GPU and compute d_idx based on document length hipMemcpyAsync(buffer->gpu_z, buffer->z, buffer->n_tokens*sizeof(u32), hipMemcpyHostToDevice,*buffer->stream) >> GPULDA_CHECK; hipMemcpyAsync(buffer->gpu_w, buffer->w, buffer->n_tokens*sizeof(u32), hipMemcpyHostToDevice,*buffer->stream) >> GPULDA_CHECK; hipMemcpyAsync(buffer->gpu_d_len, buffer->d, buffer->n_docs*sizeof(u32), hipMemcpyHostToDevice,*buffer->stream) >> GPULDA_CHECK; hipMemcpyAsync(buffer->gpu_K_d, buffer->K_d, buffer->n_docs*sizeof(u32), hipMemcpyHostToDevice,*buffer->stream) >> GPULDA_CHECK; hipLaunchKernelGGL(( compute_d_idx), dim3(1),dim3(GPULDA_COMPUTE_D_IDX_BLOCKDIM),0,*buffer->stream, buffer->gpu_d_len, buffer->gpu_d_idx, buffer->n_docs); // sample the topic indicators hipLaunchKernelGGL(( sample_topics), dim3(buffer->n_docs),dim3(GPULDA_SAMPLE_TOPICS_BLOCKDIM),0,*buffer->stream, args->buffer_size, buffer->gpu_z, buffer->gpu_w, buffer->gpu_d_len, buffer->gpu_d_idx, buffer->gpu_K_d, args->V, n_dense, Phi_dense, sigma_a, alias->prob, alias->alias, alias->table_size, buffer->gpu_rng); hipLaunchKernelGGL(( rng_advance), dim3(1),dim3(1),0,*buffer->stream, 2*buffer->n_tokens,Phi_rng); // copy z back to host hipMemcpyAsync(buffer->z, buffer->gpu_z, buffer->n_tokens*sizeof(u32), hipMemcpyDeviceToHost,*buffer->stream) >> GPULDA_CHECK; } extern "C" void sync_buffer(Buffer *buffer) { // return when stream has finished hipStreamSynchronize(*buffer->stream) >> GPULDA_CHECK; } }
train.cu
#include <cuda_runtime.h> #include <curand_kernel.h> // need to add -lcurand to nvcc flags #include <cublas_v2.h> // need to add -lcublas to nvcc flags #include "assert.h" #include "train.cuh" #include "error.cuh" #include "poisson.cuh" #include "polyaurn.cuh" #include "random.cuh" #include "spalias.cuh" #include "topics.cuh" #include "tuning.cuh" namespace gpulda { // global variables Args* args; // externally visible f32* Phi_dense; u32* n_dense; f32* Phi_temp; Poisson* pois; SpAlias* alias; f32* sigma_a; u32* C; curandStatePhilox4_32_10_t* Phi_rng; cudaStream_t* Phi_stream; cublasHandle_t* cublas_handle; f32* d_one; f32* d_zero; extern "C" void initialize(Args* init_args, Buffer* buffers, u32 n_buffers) { // set the pointer to args struct args = init_args; // set heap size for hashmaps size_t heap_size; size_t minimum_heap_size = ((size_t) args->max_D) * ((size_t) GPULDA_D_HEAP_SIZE) * ((size_t) 2); cudaDeviceGetLimit(&heap_size, cudaLimitMallocHeapSize) >> GPULDA_CHECK; if(heap_size < minimum_heap_size) { cudaDeviceSetLimit(cudaLimitMallocHeapSize, minimum_heap_size) >> GPULDA_CHECK; cudaDeviceGetLimit(&heap_size, cudaLimitMallocHeapSize) >> GPULDA_CHECK; if(heap_size < minimum_heap_size) { cudaErrorMemoryAllocation >> GPULDA_CHECK; } } // allocate and initialize cuBLAS cublas_handle = new cublasHandle_t; cublasCreate(cublas_handle) >> GPULDA_CHECK; cublasSetPointerMode(*cublas_handle, CUBLAS_POINTER_MODE_DEVICE) >> GPULDA_CHECK; f32 h_zero = 0.0f; cudaMalloc(&d_zero, sizeof(f32)) >> GPULDA_CHECK; cudaMemcpy(d_zero, &h_zero, sizeof(f32), cudaMemcpyHostToDevice) >> GPULDA_CHECK; f32 h_one = 1.0f; cudaMalloc(&d_one, sizeof(f32)) >> GPULDA_CHECK; cudaMemcpy(d_one, &h_one, sizeof(f32), cudaMemcpyHostToDevice) >> GPULDA_CHECK; cudaMalloc(&Phi_temp, args->K * args->V * sizeof(f32)) >> GPULDA_CHECK; // allocate and initialize cuRAND cudaMalloc(&Phi_rng, sizeof(curandStatePhilox4_32_10_t)) >> GPULDA_CHECK; rng_init<<<1,1>>>(0, 0, Phi_rng); cudaDeviceSynchronize() >> GPULDA_CHECK; // allocate and initialize streams Phi_stream = new cudaStream_t; cudaStreamCreate(Phi_stream) >> GPULDA_CHECK; // allocate memory for buffers for(i32 i = 0; i < n_buffers; ++i) { buffers[i].stream = new cudaStream_t; cudaStreamCreate(buffers[i].stream) >> GPULDA_CHECK; cudaMalloc(&buffers[i].gpu_z, args->buffer_size * sizeof(u32)) >> GPULDA_CHECK; cudaMalloc(&buffers[i].gpu_w, args->buffer_size * sizeof(u32)) >> GPULDA_CHECK; cudaMalloc(&buffers[i].gpu_d_len, args->max_D * sizeof(u32)) >> GPULDA_CHECK; cudaMalloc(&buffers[i].gpu_d_idx, args->max_D * sizeof(u32)) >> GPULDA_CHECK; cudaMalloc(&buffers[i].gpu_K_d, args->max_D * sizeof(u32)) >> GPULDA_CHECK; cudaMalloc(&buffers[i].gpu_rng, sizeof(curandStatePhilox4_32_10_t)) >> GPULDA_CHECK; rng_init<<<1,1>>>(0, i + 1, buffers[i].gpu_rng); cudaDeviceSynchronize() >> GPULDA_CHECK; } // allocate globals cudaMalloc(&Phi_dense, args->K * args->V * sizeof(f32)) >> GPULDA_CHECK; cudaMalloc(&n_dense, args->K * args->V * sizeof(u32)) >> GPULDA_CHECK; pois = new Poisson(GPULDA_POIS_MAX_LAMBDA, GPULDA_POIS_MAX_VALUE, args->beta); alias = new SpAlias(args->V, args->K); cudaMalloc(&sigma_a,args->V * sizeof(f32)) >> GPULDA_CHECK; cudaMalloc(&C,args->V * sizeof(u32)) >> GPULDA_CHECK; cudaMemcpy(C, args->C, args->V * sizeof(u32), cudaMemcpyHostToDevice) >> GPULDA_CHECK; // run device init code polya_urn_init<<<args->K,GPULDA_POLYA_URN_SAMPLE_BLOCKDIM>>>(n_dense, C, args->K, args->beta, args->V, pois->pois_alias->prob, pois->pois_alias->alias, pois->max_lambda, pois->max_value, Phi_rng); cudaDeviceSynchronize() >> GPULDA_CHECK; rng_advance<<<1,1>>>(args->K*args->V,Phi_rng); cudaDeviceSynchronize() >> GPULDA_CHECK; } extern "C" void cleanup(Buffer* buffers, u32 n_buffers) { // deallocate globals cudaFree(C) >> GPULDA_CHECK; cudaFree(sigma_a) >> GPULDA_CHECK; delete alias; delete pois; cudaFree(n_dense) >> GPULDA_CHECK; cudaFree(Phi_dense) >> GPULDA_CHECK; // deallocate memory for buffers for(i32 i = 0; i < n_buffers; ++i) { cudaFree(buffers[i].gpu_z) >> GPULDA_CHECK; cudaFree(buffers[i].gpu_w) >> GPULDA_CHECK; cudaFree(buffers[i].gpu_d_len) >> GPULDA_CHECK; cudaFree(buffers[i].gpu_d_idx) >> GPULDA_CHECK; cudaFree(buffers[i].gpu_K_d) >> GPULDA_CHECK; cudaFree(buffers[i].gpu_rng) >> GPULDA_CHECK; cudaStreamDestroy(*buffers[i].stream) >> GPULDA_CHECK; delete buffers[i].stream; } // deallocate streams cudaStreamDestroy(*Phi_stream) >> GPULDA_CHECK; delete Phi_stream; // deallocate cuRAND cudaFree(Phi_rng) >> GPULDA_CHECK; // deallocate cuBLAS cudaFree(Phi_temp) >> GPULDA_CHECK; cudaFree(d_zero) >> GPULDA_CHECK; cudaFree(d_one) >> GPULDA_CHECK; cublasDestroy(*cublas_handle) >> GPULDA_CHECK; delete cublas_handle; // remove the args pointer args = NULL; } extern "C" void sample_phi() { // draw Phi ~ PPU(n + beta) polya_urn_sample<<<args->K,GPULDA_POLYA_URN_SAMPLE_BLOCKDIM,0,*Phi_stream>>>(Phi_dense, n_dense, args->beta, args->V, pois->pois_alias->prob, pois->pois_alias->alias, pois->max_lambda, pois->max_value, Phi_rng); rng_advance<<<1,1,0,*Phi_stream>>>(args->K*args->V,Phi_rng); // copy Phi for transpose, set the stream, then transpose Phi polya_urn_transpose(Phi_stream, Phi_dense, Phi_temp, args->K, args->V, cublas_handle, d_zero, d_one); // compute sigma_a and alias probabilities polya_urn_colsums<<<args->V,GPULDA_POLYA_URN_COLSUMS_BLOCKDIM,0,*Phi_stream>>>(Phi_dense, sigma_a, args->alpha, alias->prob, args->K); // build Alias tables build_alias<<<args->V,32,2*next_pow2(args->K)*sizeof(i32), *Phi_stream>>>(alias->prob, alias->alias, args->K); // reset sufficient statistics for n polya_urn_reset<<<args->K,128,0,*Phi_stream>>>(n_dense, args->V); // don't return until operations completed cudaStreamSynchronize(*Phi_stream) >> GPULDA_CHECK; } extern "C" void sample_z_async(Buffer* buffer) { // copy z,w,d to GPU and compute d_idx based on document length cudaMemcpyAsync(buffer->gpu_z, buffer->z, buffer->n_tokens*sizeof(u32), cudaMemcpyHostToDevice,*buffer->stream) >> GPULDA_CHECK; cudaMemcpyAsync(buffer->gpu_w, buffer->w, buffer->n_tokens*sizeof(u32), cudaMemcpyHostToDevice,*buffer->stream) >> GPULDA_CHECK; cudaMemcpyAsync(buffer->gpu_d_len, buffer->d, buffer->n_docs*sizeof(u32), cudaMemcpyHostToDevice,*buffer->stream) >> GPULDA_CHECK; cudaMemcpyAsync(buffer->gpu_K_d, buffer->K_d, buffer->n_docs*sizeof(u32), cudaMemcpyHostToDevice,*buffer->stream) >> GPULDA_CHECK; compute_d_idx<<<1,GPULDA_COMPUTE_D_IDX_BLOCKDIM,0,*buffer->stream>>>(buffer->gpu_d_len, buffer->gpu_d_idx, buffer->n_docs); // sample the topic indicators sample_topics<<<buffer->n_docs,GPULDA_SAMPLE_TOPICS_BLOCKDIM,0,*buffer->stream>>>(args->buffer_size, buffer->gpu_z, buffer->gpu_w, buffer->gpu_d_len, buffer->gpu_d_idx, buffer->gpu_K_d, args->V, n_dense, Phi_dense, sigma_a, alias->prob, alias->alias, alias->table_size, buffer->gpu_rng); rng_advance<<<1,1,0,*buffer->stream>>>(2*buffer->n_tokens,Phi_rng); // copy z back to host cudaMemcpyAsync(buffer->z, buffer->gpu_z, buffer->n_tokens*sizeof(u32), cudaMemcpyDeviceToHost,*buffer->stream) >> GPULDA_CHECK; } extern "C" void sync_buffer(Buffer *buffer) { // return when stream has finished cudaStreamSynchronize(*buffer->stream) >> GPULDA_CHECK; } }
8a75b0a07bd72c1b74ac4c37d310d5ad1c2a335d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include "common.h" /** * Gear things up as a send - see if we can actually do better with having * GPUs in a push/pull config instead of just pull */ int main(int argc, char *argv[]) { const int nDevs = 2; // make sure we have enough devices int numDevices; CUDACHECK(hipGetDeviceCount(&numDevices)); if (numDevices < nDevs) { std::cout << "not enough devices" << std::endl; exit(EXIT_FAILURE); } // some arg parsing const size_t N = atoi(argv[1]); int nStreams = 1; if (argc > 2) nStreams = atoi(argv[2]); // divide among devs and then streams int devChunkSize = N / nDevs; int streamChunkSize = devChunkSize / nStreams; tx_type *h_data; CUDACHECK(hipHostMalloc(&h_data, N * sizeof(tx_type))); CUDACHECK(hipMemset(h_data, 1., N * sizeof(tx_type))); tx_type *d_data[nDevs]; hipEvent_t *start[nDevs]; hipEvent_t *stop[nDevs]; hipStream_t **s = (hipStream_t**)malloc(nDevs * sizeof(hipStream_t*)); for (int device = 0; device < nDevs; ++device) { CUDACHECK(hipSetDevice(device)); CUDACHECK(hipMalloc(&d_data[device], N * sizeof(tx_type))); s[device] = (hipStream_t*)malloc(nStreams * sizeof(hipStream_t)); start[device] = (hipEvent_t*)malloc(nStreams * sizeof(hipStream_t)); stop[device] = (hipEvent_t*)malloc(nStreams * sizeof(hipStream_t)); for (int i = 0; i < nStreams; ++i) { CUDACHECK(hipStreamCreate(s[device]+i)); CUDACHECK(hipEventCreate(start[device]+i)); CUDACHECK(hipEventCreate(stop[device]+i)); } } CUDACHECK(hipMemcpy(d_data[0], h_data, N * sizeof(tx_type), hipMemcpyDefault)); // do the copy // how many ways can I split this up two ways??? int fDev = 0, fStream = 0, fEvent = 0; int sDev = 1, sStream = 0, sEvent = 0; CUDACHECK(hipSetDevice(fDev)); CUDACHECK(hipEventRecord(start[fDev][fEvent], s[fDev][fStream])); CUDACHECK(hipSetDevice(sDev)); CUDACHECK(hipEventRecord(start[sDev][sEvent], s[sDev][sStream])); CUDACHECK(hipSetDevice(fDev)); CUDACHECK(hipMemcpyAsync(d_data[1], d_data[0], devChunkSize * sizeof(tx_type), hipMemcpyDefault, s[fDev][fStream])); CUDACHECK(hipSetDevice(sDev)); CUDACHECK(hipMemcpyAsync(d_data[1] + devChunkSize, d_data[0] + devChunkSize, (N - devChunkSize) * sizeof(tx_type), hipMemcpyDefault, s[sDev][sStream])); CUDACHECK(hipEventRecord(stop[fDev][fEvent], s[fDev][fStream])); CUDACHECK(hipEventRecord(stop[sDev][sEvent], s[sDev][sStream])); // error checking tx_type *h_result; CUDACHECK(hipHostMalloc(&h_result, N * sizeof(tx_type))); CUDACHECK(hipMemcpy(h_result, d_data[1], N * sizeof(tx_type), hipMemcpyDefault)); for (size_t i = 0; i < N; ++i) { if (h_result[i] != h_data[i]) { std::cout << "copy failed" << std::endl; exit(EXIT_FAILURE); } } for (int dev = 0; dev < nDevs; ++dev) { CUDACHECK(hipSetDevice(dev)); CUDACHECK(hipDeviceSynchronize()); } float ms; CUDACHECK(hipEventElapsedTime(&ms, start[fDev][fEvent], stop[fDev][fEvent])); printf("first copy took %f ms\n", ms); CUDACHECK(hipEventElapsedTime(&ms, start[sDev][sEvent], stop[sDev][sEvent])); printf("second copy took %f ms\n", ms); for (int device = 0; device < nDevs; ++device) { CUDACHECK(hipFree(d_data[device])); } CUDACHECK(hipHostFree(h_result)); CUDACHECK(hipHostFree(h_data)); for (int dev = 0; dev < nDevs; ++dev) { free(s[dev]); free(start[dev]); free(stop[dev]); } free(s); return 0; }
8a75b0a07bd72c1b74ac4c37d310d5ad1c2a335d.cu
#include <cuda_runtime.h> #include <iostream> #include "common.h" /** * Gear things up as a send - see if we can actually do better with having * GPUs in a push/pull config instead of just pull */ int main(int argc, char *argv[]) { const int nDevs = 2; // make sure we have enough devices int numDevices; CUDACHECK(cudaGetDeviceCount(&numDevices)); if (numDevices < nDevs) { std::cout << "not enough devices" << std::endl; exit(EXIT_FAILURE); } // some arg parsing const size_t N = atoi(argv[1]); int nStreams = 1; if (argc > 2) nStreams = atoi(argv[2]); // divide among devs and then streams int devChunkSize = N / nDevs; int streamChunkSize = devChunkSize / nStreams; tx_type *h_data; CUDACHECK(cudaMallocHost(&h_data, N * sizeof(tx_type))); CUDACHECK(cudaMemset(h_data, 1., N * sizeof(tx_type))); tx_type *d_data[nDevs]; cudaEvent_t *start[nDevs]; cudaEvent_t *stop[nDevs]; cudaStream_t **s = (cudaStream_t**)malloc(nDevs * sizeof(cudaStream_t*)); for (int device = 0; device < nDevs; ++device) { CUDACHECK(cudaSetDevice(device)); CUDACHECK(cudaMalloc(&d_data[device], N * sizeof(tx_type))); s[device] = (cudaStream_t*)malloc(nStreams * sizeof(cudaStream_t)); start[device] = (cudaEvent_t*)malloc(nStreams * sizeof(cudaStream_t)); stop[device] = (cudaEvent_t*)malloc(nStreams * sizeof(cudaStream_t)); for (int i = 0; i < nStreams; ++i) { CUDACHECK(cudaStreamCreate(s[device]+i)); CUDACHECK(cudaEventCreate(start[device]+i)); CUDACHECK(cudaEventCreate(stop[device]+i)); } } CUDACHECK(cudaMemcpy(d_data[0], h_data, N * sizeof(tx_type), cudaMemcpyDefault)); // do the copy // how many ways can I split this up two ways??? int fDev = 0, fStream = 0, fEvent = 0; int sDev = 1, sStream = 0, sEvent = 0; CUDACHECK(cudaSetDevice(fDev)); CUDACHECK(cudaEventRecord(start[fDev][fEvent], s[fDev][fStream])); CUDACHECK(cudaSetDevice(sDev)); CUDACHECK(cudaEventRecord(start[sDev][sEvent], s[sDev][sStream])); CUDACHECK(cudaSetDevice(fDev)); CUDACHECK(cudaMemcpyAsync(d_data[1], d_data[0], devChunkSize * sizeof(tx_type), cudaMemcpyDefault, s[fDev][fStream])); CUDACHECK(cudaSetDevice(sDev)); CUDACHECK(cudaMemcpyAsync(d_data[1] + devChunkSize, d_data[0] + devChunkSize, (N - devChunkSize) * sizeof(tx_type), cudaMemcpyDefault, s[sDev][sStream])); CUDACHECK(cudaEventRecord(stop[fDev][fEvent], s[fDev][fStream])); CUDACHECK(cudaEventRecord(stop[sDev][sEvent], s[sDev][sStream])); // error checking tx_type *h_result; CUDACHECK(cudaMallocHost(&h_result, N * sizeof(tx_type))); CUDACHECK(cudaMemcpy(h_result, d_data[1], N * sizeof(tx_type), cudaMemcpyDefault)); for (size_t i = 0; i < N; ++i) { if (h_result[i] != h_data[i]) { std::cout << "copy failed" << std::endl; exit(EXIT_FAILURE); } } for (int dev = 0; dev < nDevs; ++dev) { CUDACHECK(cudaSetDevice(dev)); CUDACHECK(cudaDeviceSynchronize()); } float ms; CUDACHECK(cudaEventElapsedTime(&ms, start[fDev][fEvent], stop[fDev][fEvent])); printf("first copy took %f ms\n", ms); CUDACHECK(cudaEventElapsedTime(&ms, start[sDev][sEvent], stop[sDev][sEvent])); printf("second copy took %f ms\n", ms); for (int device = 0; device < nDevs; ++device) { CUDACHECK(cudaFree(d_data[device])); } CUDACHECK(cudaFreeHost(h_result)); CUDACHECK(cudaFreeHost(h_data)); for (int dev = 0; dev < nDevs; ++dev) { free(s[dev]); free(start[dev]); free(stop[dev]); } free(s); return 0; }
d8c495c28473d8d8aa2ad6ce5404452c32fbedff.hip
// !!! This is a file automatically generated by hipify!!! #include "fft_module.cuh" #include "cuda_module.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "helper_functions.h" #include "helper_cuda.h" #include <stdio.h> #include <iostream> #include <hipfft.h> #include <complex> //int main(int argc, char **argv){} //The cufft must be invoked by the host, not as part of a kernel. //num_wins = number of requested frames + averaging -1, this is required to ensure the correct averaging parameters. //a requested frame is an FFT of size = resolution //by the same logic, h_samp_array must be = num_wins * resolution. h_out will be = (num_wins - (averaging-1)) * resolution void perform_fft(std::complex<short>* h_samp_arry, float* h_out, const int resolution, const int averaging, const int num_wins) { /* if (num_wins == 0) { std::cout << "AMG NO WINS!\n"; num_wins = sizeof(h_samp_arry) / (2 * resolution); std::cout << "Number of windows: " << num_wins << std::endl; } */ //const int num_wins = 1; //hipComplex* samp[resolution]; //std::complex<short>* d_samp; hipError_t cudaStatus; hipfftResult fftStatus; //Create cufft plan, turns out cufft handles its own memory transfers, so we must use callbacks in order to avoid numerous reads and writes in the device //Will however use multiple kernels initially, then see what the performance improvement is with callbacks at a later stage. n.n hipfftHandle plan; fftStatus = hipfftPlan1d(&plan, resolution, HIPFFT_C2C, (num_wins + averaging - 1)); //is deprecated //int n[1] = { resolution }; //fftStatus = hipfftPlanMany(&plan, 1, n, // NULL, 1, resolution, /// NULL, 1, resolution, // HIPFFT_C2C, (num_wins + averaging - 1)); if (fftStatus != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT error creating plan: %d\n", fftStatus); goto Error; } // for outputting of averaged and processed samples / float* d_out; //cast stl complex to cuda complex cuComplexShort* h_samp_ptr = (cuComplexShort*)&h_samp_arry[0]; //std::cout << h_samp_arry[0].real() << "," << h_samp_arry[0].imag() << " cuCmplx" << h_samp_ptr[0].x << "," << h_samp_ptr[0].y << std::endl; float* h_coef; h_coef = (float*)malloc(sizeof(float)*resolution); float* d_coef; cuComplexShort* d_samp; hipComplex* d_fftbuff; float win_power = 0; int rx_gain = 30; //Create coefficient array and x axis index for plotting for (int i = 0; i < resolution; i++) { h_coef[i] = 0.35875 - 0.48829*cos(2 * pi*i / (resolution - 1)) + 0.14128*cos(4 * pi*i / (resolution - 1)) - 0.01168*cos(6 * pi*i / (resolution - 1)); //blackmann harris window win_power += (h_coef[i] * h_coef[i]); //this computes the total window power and normalises it to account for DC gain due to the window. } win_power /= resolution; //normalise the total window power across each sample. const float offset = 10 - rx_gain + 10 * std::log10(win_power); //10 is the MAX power detected by the ADC and take into account the gain of the frontend. //printf("GPU Offset: %f", offset); cuda_memcheck(); //allocate the memory for the GPU cudaStatus = hipMalloc((cuComplexShort**)&d_samp, sizeof(cuComplexShort)* resolution*(num_wins + averaging - 1)); if (cudaStatus != hipSuccess) { fprintf(stderr, "d_samp hipMalloc failed! %s", hipGetErrorString(cudaStatus)); goto Error; } cuda_memcheck(); cudaStatus = hipMalloc((float**)&d_coef, sizeof(float)*resolution); if (cudaStatus != hipSuccess) { fprintf(stderr, "d_coef hipMalloc failed! %s", hipGetErrorString(cudaStatus)); goto Error; } cuda_memcheck(); cudaStatus = hipMalloc((hipComplex**)&d_fftbuff, sizeof(hipComplex)*resolution*(num_wins + averaging - 1)); if (cudaStatus != hipSuccess) { fprintf(stderr, "d_fftbuff hipMalloc failed! %s", hipGetErrorString(cudaStatus)); goto Error; } cuda_memcheck(); //Transfer data to GPU cudaStatus = hipMemcpy(d_coef, h_coef, sizeof(float)*resolution, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy to Device failed! %s", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipMemcpy(d_samp, h_samp_ptr, sizeof(cuComplexShort)*resolution*(num_wins + averaging - 1), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy to Device failed! %s", hipGetErrorString(cudaStatus)); goto Error; } hipLaunchKernelGGL(( cufft_prep) , dim3((resolution*num_wins) / CU_THD), dim3(CU_THD >> > (d_fftbuff, d_samp, d_coef, (num_wins + averaging - 1), resolution); //This will create (WIN_SAMPS*num_wins)/CU_THD blocks), with 1024 threads per block checkCudaErrors(hipFree(d_samp)); checkCudaErrors(hipFree(d_coef)); //inplace fft fftStatus = hipfftExecC2C(plan, d_fftbuff, d_fftbuff, HIPFFT_FORWARD); if (fftStatus != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT error: ExecC2C Forward failed %d\n", fftStatus); goto Error; } cudaStatus = hipMalloc((float**)&d_out, sizeof(float)*resolution * num_wins); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed! %s", hipGetErrorString(cudaStatus)); goto Error; } hipMemset(d_out, 0, sizeof(float)*resolution * num_wins); //initialise to zero //Do something with the fft'd samples, like average them, then output them to the host, where the host can perform detection. avg_out , resolution / CU_THD, CU_THD , 0, 0, 0, d_out, d_fftbuff, num_wins, averaging, offset, resolution); hipLaunchKernelGGL(( filter) , dim3(resolution / CU_THD), dim3(CU_THD) , 0, 0, d_out, num_wins, resolution); //As this uses the correct moving average, num_wins does not have to be divided out cudaStatus = hipMemcpy(h_out, d_out, sizeof(float)*resolution * num_wins, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy to Host failed! %s", hipGetErrorString(cudaStatus)); goto Error; } Error: hipfftDestroy(plan); checkCudaErrors(hipFree(d_out)); //checkCudaErrors(hipFree(d_samp)); //checkCudaErrors(hipFree(d_coef)); checkCudaErrors(hipFree(d_fftbuff)); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed! %s", hipGetErrorString(cudaStatus)); } //return h_out; } //Kernel Call //https://devblogs.nvidia.com/parallelforall/cuda-pro-tip-use-cufft-callbacks-custom-data-processing/ for inspiration static __global__ void cufft_prep(hipComplex* d_fft, cuComplexShort* d_s, float* d_w, const int num_wins, const int resolution) { int idx = threadIdx.x; //blockDim = number of threads in a block //This will take an array of complex shorts (14b samples) an array of hipComplex and a window array, will convert the com_short to hipComplex (com_float), correctly scale the samples and apply the appropriate window prepping it for fft for (int i = blockIdx.x * blockDim.x + idx; i < resolution*num_wins; i += blockDim.x * gridDim.x){ d_fft[i].x = (d_s[i].x*1.0f / 32767.0f) * d_w[i%resolution]; d_fft[i].y = (d_s[i].y*1.0f / 32767.0f) * d_w[i%resolution]; } //if(idx == 0) printf("d_s[%d]: %f,%f fftbuff %f,%f\n", idx, d_s[idx].x, d_s[idx].y, d_s[idx].x, d_s[idx].x); } static __global__ void filter(float*out, const int num_wins, const int resolution){ int idx = threadIdx.x; int stride = blockDim.x * gridDim.x; float* out_ptr = &out[0]; const int fiveby_filter_level = 13; //normally 16 for 5x5, 13 for aggressive. const int filter_level = 5; // 3x3 kernel bool FIVEBY = true; //for use later //increment loop by 1, and decrease total run by 1 to accomodate for edges of the kernel if (!FIVEBY){ for (int i = (blockIdx.x * blockDim.x + idx) + stride; i < resolution*(num_wins - 1); i += stride){ if (out_ptr[i] == 0 && (blockIdx.x + idx != 0 || blockIdx.x + idx != resolution - 1)){ if ((out_ptr[i - 1 - stride] + out_ptr[i - stride] + out_ptr[i + 1 - stride] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i - 1 + stride] + out_ptr[i + stride] + out_ptr[i + 1 + stride]) > filter_level){ out_ptr[i] = 1; } } } } if (FIVEBY){ //special case code for handling the beginning and end of the image, note that edges are ignored as they are significantly less impactful on window generation for (int i = blockIdx.x * blockDim.x + idx, j = 0; i < stride*(num_wins); i += stride, j++){ if (out_ptr[i] == 0 && (blockIdx.x * blockDim.x + idx != 0 && blockIdx.x * blockDim.x + idx != resolution - 1 && blockIdx.x * blockDim.x + idx != 1 && blockIdx.x * blockDim.x + idx != resolution - 2)){ if (j == 0){ if (( //unrolled here for efficiencies out_ptr[i - 2] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i + 2] + out_ptr[i - 2 + stride] + out_ptr[i - 1 + stride] + out_ptr[i + stride] + out_ptr[i + 1 + stride] + out_ptr[i + 2 + stride] + out_ptr[i - 2 + 2 * stride] + out_ptr[i - 1 + 2 * stride] + out_ptr[i + 2 * stride] + out_ptr[i + 1 + 2 * stride] + out_ptr[i + 2 + 2 * stride]) > fiveby_filter_level - 6) { out_ptr[i] = 1; } } else if (j == 1){ if (( //unrolled here for efficiencies out_ptr[i - 2 - stride] + out_ptr[i - 1 - stride] + out_ptr[i - stride] + out_ptr[i + 1 - stride] + out_ptr[i + 2 - stride] + out_ptr[i - 2] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i + 2] + out_ptr[i - 2 + stride] + out_ptr[i - 1 + stride] + out_ptr[i + stride] + out_ptr[i + 1 + stride] + out_ptr[i + 2 + stride] + out_ptr[i - 2 + 2 * stride] + out_ptr[i - 1 + 2 * stride] + out_ptr[i + 2 * stride] + out_ptr[i + 1 + 2 * stride] + out_ptr[i + 2 + 2 * stride]) > fiveby_filter_level - 3) { out_ptr[i] = 1; } } else if (j >= 2 && j < num_wins - 2){ if (( //unrolled here for efficiencies out_ptr[i - 2 - 2 * stride] + out_ptr[i - 1 - 2 * stride] + out_ptr[i - 2 * stride] + out_ptr[i + 1 - 2 * stride] + out_ptr[i + 2 - 2 * stride] + out_ptr[i - 2 - stride] + out_ptr[i - 1 - stride] + out_ptr[i - stride] + out_ptr[i + 1 - stride] + out_ptr[i + 2 - stride] + out_ptr[i - 2] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i + 2] + out_ptr[i - 2 + stride] + out_ptr[i - 1 + stride] + out_ptr[i + stride] + out_ptr[i + 1 + stride] + out_ptr[i + 2 + stride] + out_ptr[i - 2 + 2 * stride] + out_ptr[i - 1 + 2 * stride] + out_ptr[i + 2 * stride] + out_ptr[i + 1 + 2 * stride] + out_ptr[i + 2 + 2 * stride]) > fiveby_filter_level) { out_ptr[i] = 1; } } else if (j == num_wins - 2){ if (( //unrolled here for efficiencies, this isnt called ... bug out_ptr[i - 2 - 2 * stride] + out_ptr[i - 1 - 2 * stride] + out_ptr[i - 2 * stride] + out_ptr[i + 1 - 2 * stride] + out_ptr[i + 2 - 2 * stride] + out_ptr[i - 2 - stride] + out_ptr[i - 1 - stride] + out_ptr[i - stride] + out_ptr[i + 1 - stride] + out_ptr[i + 2 - stride] + out_ptr[i - 2] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i + 2] + out_ptr[i - 2 + stride] + out_ptr[i - 1 + stride] + out_ptr[i + stride] + out_ptr[i + 1 + stride] + out_ptr[i + 2 + stride]) > fiveby_filter_level - 3) { out_ptr[i] = 1; } } else if (j == num_wins - 1){ if (( //unrolled here for efficiencies, neither is this ... BUG!!!!! out_ptr[i - 2 - 2 * stride] + out_ptr[i - 1 - 2 * stride] + out_ptr[i - 2 * stride] + out_ptr[i + 1 - 2 * stride] + out_ptr[i + 2 - 2 * stride] + out_ptr[i - 2 - stride] + out_ptr[i - 1 - stride] + out_ptr[i - stride] + out_ptr[i + 1 - stride] + out_ptr[i + 2 - stride] + out_ptr[i - 2] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i + 2]) > fiveby_filter_level - 6) { out_ptr[i] = 1; } } } } } } static __global__ void avg_out(float* out, hipComplex* d_fft, const int num_wins, const int averaging, const float offset, const int resolution) { //Need to modify for appropriate averaging output int idx = threadIdx.x; float* out_ptr = &out[0]; hipComplex* d_fft_ptr = &d_fft[0]; const float threshold = -96; bool THRESHOLD = true; for (int j = 0; j < num_wins; j++){ //what about the final set of frames? They should be retained and re-computed to maintain accurate averaging ... for (int i = blockIdx.x * blockDim.x + idx; i < resolution*averaging; i += blockDim.x * gridDim.x){ //Moving average of each output bin according to the 'averaging' value - typically set to 10 out_ptr[((resolution / 2) + i) % resolution] += ( 10 * log10(abs(d_fft_ptr[i].x * d_fft_ptr[i].x + d_fft_ptr[i].y * d_fft_ptr[i].y) / resolution) //DFT bin magnitude ); } // __syncthreads(); if (THRESHOLD){ out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] = ((out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] / averaging + offset) <= threshold) ? 1 : 0; } else { out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] = (out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] / averaging + offset); } // if (out_ptr[blockIdx.x * blockDim.x + idx] <= threshold) out_ptr[blockIdx.x * blockDim.x + idx] = 1; // elseP out_ptr[blockIdx.x * blockDim.x + idx] = 0; out_ptr += resolution; //increment out_ptr by one frame of averages d_fft_ptr += resolution; //increment d_fft_ptr by one frame to maintain rolling average } } /* DEPRECATED static __global__ void avg_out_filter(float* out, hipComplex* d_fft, const int num_wins, const int averaging, const float offset, const int resolution) { //Need to modify for appropriate averaging output //Remember whitespace is a 1 int idx = threadIdx.x; float* out_ptr = &out[0]; hipComplex* d_fft_ptr = &d_fft[0]; const float threshold = -96; const int filter_level = 13; //normally 16 for fiveby, 13 for aggressive bool THRESHOLD = true; bool FILTER = true; bool FIVEBY = false; for (int j = 0; j < num_wins; j++){ for (int i = blockIdx.x * blockDim.x + idx; i < resolution*averaging; i += blockDim.x * gridDim.x){ out_ptr[((resolution / 2) + i) % resolution] += ( 10 * log10(abs(d_fft_ptr[i].x * d_fft_ptr[i].x + d_fft_ptr[i].y * d_fft_ptr[i].y) / resolution) //DFT bin magnitude ); } // __syncthreads(); if (THRESHOLD){ out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] = ((out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] / averaging + offset) <= threshold) ? 1 : 0; } else { out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] = (out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] / averaging + offset); } // if (out_ptr[blockIdx.x * blockDim.x + idx] <= threshold) out_ptr[blockIdx.x * blockDim.x + idx] = 1; // elseP out_ptr[blockIdx.x * blockDim.x + idx] = 0; out_ptr += resolution; //increment out_ptr by one frame of averages d_fft_ptr += resolution; //increment d_fft_ptr by number of frames averaged } //Now perform filtering, only if thresholding is performed if (THRESHOLD && FILTER && !FIVEBY){ //Zero out pointer out_ptr = &out[0 + resolution]; //we dont want to filter the first row - at this stage anyway int absthreadidx = blockIdx.x * blockDim.x + threadIdx.x; //I wanted to be more explicit before, shortcutting here //j starts at 1 and ends at num_wins-1 to give the sufficient spacing for the 3x3 kernel for (int j = 1; j < num_wins-1; j++){ if (j == 0) { //first row } else if (j == num_wins - 1) { //last row } if (absthreadidx == 0) { //left edge } else if (absthreadidx == resolution - 1) { //right edge } else { //everything else //If the centre of a kernel = 1, take a 3 by 3 kernel, and sum the edge cells, if greater than 7, can assume this is noise if (out_ptr[absthreadidx] == 0) { //Currently set to detect a lone cell. Can increase this for more agressive filtering. Though the kernel size may have to increase also if ((out_ptr[absthreadidx - resolution - 1] + out_ptr[absthreadidx - resolution] + out_ptr[absthreadidx - resolution + 1] + out_ptr[absthreadidx - 1] + out_ptr[absthreadidx + 1] + out_ptr[absthreadidx + resolution - 1] + out_ptr[absthreadidx + resolution] + out_ptr[absthreadidx + resolution + 1]) > 6) { out_ptr[absthreadidx] = 1; } } } out_ptr += resolution; //next row of output array (as the 2d output is really just a very long 1d array) } } else if (THRESHOLD && FILTER && FIVEBY){ //Zero out pointer out_ptr = &out[0 + 2 * resolution]; //we dont want to filter the first 2 rows - at this stage anyway int absthreadidx = blockIdx.x * blockDim.x + threadIdx.x; //I wanted to be more explicit before, shortcutting here //j starts at 1 and ends at num_wins-1 to give the sufficient spacing for the 3x3 kernel for (int j = 2; j < num_wins - 2; j++){ if (j == 0 || j == 1) { //first row } else if (j == num_wins - 2 || j == num_wins - 1) { //last row } if (absthreadidx == 0 || absthreadidx == 1) { //left edge } else if (absthreadidx == resolution - 1 || absthreadidx == resolution - 2) { //right edge } else { //everything else //If the centre of a kernel = 1, take a 3 by 3 kernel, and sum the edge cells, if greater than filter_level, can assume this is noise if (out_ptr[absthreadidx] == 0) { //Currently set to detect a lone cell. Can increase this for more agressive filtering. Though the kernel size may have to increase also if (( out_ptr[absthreadidx - 2 * resolution - 2] + out_ptr[absthreadidx - 2 * resolution - 1] + out_ptr[absthreadidx - 2 * resolution] + out_ptr[absthreadidx - 2 * resolution + 1] + out_ptr[absthreadidx - 2 * resolution + 2] + out_ptr[absthreadidx - resolution - 2] + out_ptr[absthreadidx - resolution - 1] + out_ptr[absthreadidx - resolution] + out_ptr[absthreadidx - resolution + 1] + out_ptr[absthreadidx - resolution + 2] + out_ptr[absthreadidx - 2] + out_ptr[absthreadidx - 1] + out_ptr[absthreadidx + 1] + out_ptr[absthreadidx + 2] + out_ptr[absthreadidx + resolution - 2] + out_ptr[absthreadidx + resolution - 1] + out_ptr[absthreadidx + resolution] + out_ptr[absthreadidx + resolution + 1] + out_ptr[absthreadidx + resolution + 2] + out_ptr[absthreadidx + 2 * resolution - 2] + out_ptr[absthreadidx + 2 * resolution - 1] + out_ptr[absthreadidx + 2 * resolution] + out_ptr[absthreadidx + 2 * resolution + 1] + out_ptr[absthreadidx + 2 * resolution + 2] ) > filter_level) { out_ptr[absthreadidx] = 1; } } } out_ptr += resolution; //next row of output array (as the 2d output is really just a very long 1d array) } } } /* /* BACKUP LOL static __global__ void avg_out(float* out, hipComplex* d_fft, const int num_wins, const int averaging) { int idx = threadIdx.x; float* out_ptr = &out[0]; hipComplex* d_fft_ptr = &d_fft[0]; for (int j = 0; j < num_wins / averaging; j++){ for (int i = blockIdx.x * blockDim.x + idx; i < NUM_SAMPS*averaging; i += blockDim.x * gridDim.x){ out_ptr[i%NUM_SAMPS] += ( 10 * log10(abs(d_fft_ptr[i].x * d_fft_ptr[i].x + d_fft_ptr[i].y * d_fft_ptr[i].y) / NUM_SAMPS) //DFT bin magnitude ); } out_ptr += NUM_SAMPS; //increment out_ptr by one frame of averages d_fft_ptr += NUM_SAMPS*averaging; //increment d_fft_ptr by number of frames averaged } }*/ void cuda_memcheck() { size_t free_byte; size_t total_byte; hipError_t cudaStatus; cudaStatus = hipMemGetInfo(&free_byte, &total_byte); size_t used_byte = total_byte - free_byte; if (cudaStatus != hipSuccess){ printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cudaStatus)); exit(1); } else printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n", used_byte / 1024.0 / 1024.0, free_byte / 1024.0 / 1024.0, total_byte / 1024.0 / 1024.0); }
d8c495c28473d8d8aa2ad6ce5404452c32fbedff.cu
#include "fft_module.cuh" #include "cuda_module.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "helper_functions.h" #include "helper_cuda.h" #include <stdio.h> #include <iostream> #include <cufft.h> #include <complex> //int main(int argc, char **argv){} //The cufft must be invoked by the host, not as part of a kernel. //num_wins = number of requested frames + averaging -1, this is required to ensure the correct averaging parameters. //a requested frame is an FFT of size = resolution //by the same logic, h_samp_array must be = num_wins * resolution. h_out will be = (num_wins - (averaging-1)) * resolution void perform_fft(std::complex<short>* h_samp_arry, float* h_out, const int resolution, const int averaging, const int num_wins) { /* if (num_wins == 0) { std::cout << "AMG NO WINS!\n"; num_wins = sizeof(h_samp_arry) / (2 * resolution); std::cout << "Number of windows: " << num_wins << std::endl; } */ //const int num_wins = 1; //cuComplex* samp[resolution]; //std::complex<short>* d_samp; cudaError_t cudaStatus; cufftResult fftStatus; //Create cufft plan, turns out cufft handles its own memory transfers, so we must use callbacks in order to avoid numerous reads and writes in the device //Will however use multiple kernels initially, then see what the performance improvement is with callbacks at a later stage. n.n cufftHandle plan; fftStatus = cufftPlan1d(&plan, resolution, CUFFT_C2C, (num_wins + averaging - 1)); //is deprecated //int n[1] = { resolution }; //fftStatus = cufftPlanMany(&plan, 1, n, // NULL, 1, resolution, /// NULL, 1, resolution, // CUFFT_C2C, (num_wins + averaging - 1)); if (fftStatus != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT error creating plan: %d\n", fftStatus); goto Error; } // for outputting of averaged and processed samples / float* d_out; //cast stl complex to cuda complex cuComplexShort* h_samp_ptr = (cuComplexShort*)&h_samp_arry[0]; //std::cout << h_samp_arry[0].real() << "," << h_samp_arry[0].imag() << " cuCmplx" << h_samp_ptr[0].x << "," << h_samp_ptr[0].y << std::endl; float* h_coef; h_coef = (float*)malloc(sizeof(float)*resolution); float* d_coef; cuComplexShort* d_samp; cuComplex* d_fftbuff; float win_power = 0; int rx_gain = 30; //Create coefficient array and x axis index for plotting for (int i = 0; i < resolution; i++) { h_coef[i] = 0.35875 - 0.48829*cos(2 * pi*i / (resolution - 1)) + 0.14128*cos(4 * pi*i / (resolution - 1)) - 0.01168*cos(6 * pi*i / (resolution - 1)); //blackmann harris window win_power += (h_coef[i] * h_coef[i]); //this computes the total window power and normalises it to account for DC gain due to the window. } win_power /= resolution; //normalise the total window power across each sample. const float offset = 10 - rx_gain + 10 * std::log10(win_power); //10 is the MAX power detected by the ADC and take into account the gain of the frontend. //printf("GPU Offset: %f", offset); cuda_memcheck(); //allocate the memory for the GPU cudaStatus = cudaMalloc((cuComplexShort**)&d_samp, sizeof(cuComplexShort)* resolution*(num_wins + averaging - 1)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "d_samp cudaMalloc failed! %s", cudaGetErrorString(cudaStatus)); goto Error; } cuda_memcheck(); cudaStatus = cudaMalloc((float**)&d_coef, sizeof(float)*resolution); if (cudaStatus != cudaSuccess) { fprintf(stderr, "d_coef cudaMalloc failed! %s", cudaGetErrorString(cudaStatus)); goto Error; } cuda_memcheck(); cudaStatus = cudaMalloc((cuComplex**)&d_fftbuff, sizeof(cuComplex)*resolution*(num_wins + averaging - 1)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "d_fftbuff cudaMalloc failed! %s", cudaGetErrorString(cudaStatus)); goto Error; } cuda_memcheck(); //Transfer data to GPU cudaStatus = cudaMemcpy(d_coef, h_coef, sizeof(float)*resolution, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy to Device failed! %s", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaMemcpy(d_samp, h_samp_ptr, sizeof(cuComplexShort)*resolution*(num_wins + averaging - 1), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy to Device failed! %s", cudaGetErrorString(cudaStatus)); goto Error; } cufft_prep <<< (resolution*num_wins) / CU_THD, CU_THD >> > (d_fftbuff, d_samp, d_coef, (num_wins + averaging - 1), resolution); //This will create (WIN_SAMPS*num_wins)/CU_THD blocks, with 1024 threads per block checkCudaErrors(cudaFree(d_samp)); checkCudaErrors(cudaFree(d_coef)); //inplace fft fftStatus = cufftExecC2C(plan, d_fftbuff, d_fftbuff, CUFFT_FORWARD); if (fftStatus != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT error: ExecC2C Forward failed %d\n", fftStatus); goto Error; } cudaStatus = cudaMalloc((float**)&d_out, sizeof(float)*resolution * num_wins); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! %s", cudaGetErrorString(cudaStatus)); goto Error; } cudaMemset(d_out, 0, sizeof(float)*resolution * num_wins); //initialise to zero //Do something with the fft'd samples, like average them, then output them to the host, where the host can perform detection. avg_out <<< resolution / CU_THD, CU_THD >>> (d_out, d_fftbuff, num_wins, averaging, offset, resolution); filter <<< resolution / CU_THD, CU_THD >>> (d_out, num_wins, resolution); //As this uses the correct moving average, num_wins does not have to be divided out cudaStatus = cudaMemcpy(h_out, d_out, sizeof(float)*resolution * num_wins, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy to Host failed! %s", cudaGetErrorString(cudaStatus)); goto Error; } Error: cufftDestroy(plan); checkCudaErrors(cudaFree(d_out)); //checkCudaErrors(cudaFree(d_samp)); //checkCudaErrors(cudaFree(d_coef)); checkCudaErrors(cudaFree(d_fftbuff)); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed! %s", cudaGetErrorString(cudaStatus)); } //return h_out; } //Kernel Call //https://devblogs.nvidia.com/parallelforall/cuda-pro-tip-use-cufft-callbacks-custom-data-processing/ for inspiration static __global__ void cufft_prep(cuComplex* d_fft, cuComplexShort* d_s, float* d_w, const int num_wins, const int resolution) { int idx = threadIdx.x; //blockDim = number of threads in a block //This will take an array of complex shorts (14b samples) an array of cuComplex and a window array, will convert the com_short to cuComplex (com_float), correctly scale the samples and apply the appropriate window prepping it for fft for (int i = blockIdx.x * blockDim.x + idx; i < resolution*num_wins; i += blockDim.x * gridDim.x){ d_fft[i].x = (d_s[i].x*1.0f / 32767.0f) * d_w[i%resolution]; d_fft[i].y = (d_s[i].y*1.0f / 32767.0f) * d_w[i%resolution]; } //if(idx == 0) printf("d_s[%d]: %f,%f fftbuff %f,%f\n", idx, d_s[idx].x, d_s[idx].y, d_s[idx].x, d_s[idx].x); } static __global__ void filter(float*out, const int num_wins, const int resolution){ int idx = threadIdx.x; int stride = blockDim.x * gridDim.x; float* out_ptr = &out[0]; const int fiveby_filter_level = 13; //normally 16 for 5x5, 13 for aggressive. const int filter_level = 5; // 3x3 kernel bool FIVEBY = true; //for use later //increment loop by 1, and decrease total run by 1 to accomodate for edges of the kernel if (!FIVEBY){ for (int i = (blockIdx.x * blockDim.x + idx) + stride; i < resolution*(num_wins - 1); i += stride){ if (out_ptr[i] == 0 && (blockIdx.x + idx != 0 || blockIdx.x + idx != resolution - 1)){ if ((out_ptr[i - 1 - stride] + out_ptr[i - stride] + out_ptr[i + 1 - stride] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i - 1 + stride] + out_ptr[i + stride] + out_ptr[i + 1 + stride]) > filter_level){ out_ptr[i] = 1; } } } } if (FIVEBY){ //special case code for handling the beginning and end of the image, note that edges are ignored as they are significantly less impactful on window generation for (int i = blockIdx.x * blockDim.x + idx, j = 0; i < stride*(num_wins); i += stride, j++){ if (out_ptr[i] == 0 && (blockIdx.x * blockDim.x + idx != 0 && blockIdx.x * blockDim.x + idx != resolution - 1 && blockIdx.x * blockDim.x + idx != 1 && blockIdx.x * blockDim.x + idx != resolution - 2)){ if (j == 0){ if (( //unrolled here for efficiencies out_ptr[i - 2] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i + 2] + out_ptr[i - 2 + stride] + out_ptr[i - 1 + stride] + out_ptr[i + stride] + out_ptr[i + 1 + stride] + out_ptr[i + 2 + stride] + out_ptr[i - 2 + 2 * stride] + out_ptr[i - 1 + 2 * stride] + out_ptr[i + 2 * stride] + out_ptr[i + 1 + 2 * stride] + out_ptr[i + 2 + 2 * stride]) > fiveby_filter_level - 6) { out_ptr[i] = 1; } } else if (j == 1){ if (( //unrolled here for efficiencies out_ptr[i - 2 - stride] + out_ptr[i - 1 - stride] + out_ptr[i - stride] + out_ptr[i + 1 - stride] + out_ptr[i + 2 - stride] + out_ptr[i - 2] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i + 2] + out_ptr[i - 2 + stride] + out_ptr[i - 1 + stride] + out_ptr[i + stride] + out_ptr[i + 1 + stride] + out_ptr[i + 2 + stride] + out_ptr[i - 2 + 2 * stride] + out_ptr[i - 1 + 2 * stride] + out_ptr[i + 2 * stride] + out_ptr[i + 1 + 2 * stride] + out_ptr[i + 2 + 2 * stride]) > fiveby_filter_level - 3) { out_ptr[i] = 1; } } else if (j >= 2 && j < num_wins - 2){ if (( //unrolled here for efficiencies out_ptr[i - 2 - 2 * stride] + out_ptr[i - 1 - 2 * stride] + out_ptr[i - 2 * stride] + out_ptr[i + 1 - 2 * stride] + out_ptr[i + 2 - 2 * stride] + out_ptr[i - 2 - stride] + out_ptr[i - 1 - stride] + out_ptr[i - stride] + out_ptr[i + 1 - stride] + out_ptr[i + 2 - stride] + out_ptr[i - 2] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i + 2] + out_ptr[i - 2 + stride] + out_ptr[i - 1 + stride] + out_ptr[i + stride] + out_ptr[i + 1 + stride] + out_ptr[i + 2 + stride] + out_ptr[i - 2 + 2 * stride] + out_ptr[i - 1 + 2 * stride] + out_ptr[i + 2 * stride] + out_ptr[i + 1 + 2 * stride] + out_ptr[i + 2 + 2 * stride]) > fiveby_filter_level) { out_ptr[i] = 1; } } else if (j == num_wins - 2){ if (( //unrolled here for efficiencies, this isnt called ... bug out_ptr[i - 2 - 2 * stride] + out_ptr[i - 1 - 2 * stride] + out_ptr[i - 2 * stride] + out_ptr[i + 1 - 2 * stride] + out_ptr[i + 2 - 2 * stride] + out_ptr[i - 2 - stride] + out_ptr[i - 1 - stride] + out_ptr[i - stride] + out_ptr[i + 1 - stride] + out_ptr[i + 2 - stride] + out_ptr[i - 2] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i + 2] + out_ptr[i - 2 + stride] + out_ptr[i - 1 + stride] + out_ptr[i + stride] + out_ptr[i + 1 + stride] + out_ptr[i + 2 + stride]) > fiveby_filter_level - 3) { out_ptr[i] = 1; } } else if (j == num_wins - 1){ if (( //unrolled here for efficiencies, neither is this ... BUG!!!!! out_ptr[i - 2 - 2 * stride] + out_ptr[i - 1 - 2 * stride] + out_ptr[i - 2 * stride] + out_ptr[i + 1 - 2 * stride] + out_ptr[i + 2 - 2 * stride] + out_ptr[i - 2 - stride] + out_ptr[i - 1 - stride] + out_ptr[i - stride] + out_ptr[i + 1 - stride] + out_ptr[i + 2 - stride] + out_ptr[i - 2] + out_ptr[i - 1] + out_ptr[i + 1] + out_ptr[i + 2]) > fiveby_filter_level - 6) { out_ptr[i] = 1; } } } } } } static __global__ void avg_out(float* out, cuComplex* d_fft, const int num_wins, const int averaging, const float offset, const int resolution) { //Need to modify for appropriate averaging output int idx = threadIdx.x; float* out_ptr = &out[0]; cuComplex* d_fft_ptr = &d_fft[0]; const float threshold = -96; bool THRESHOLD = true; for (int j = 0; j < num_wins; j++){ //what about the final set of frames? They should be retained and re-computed to maintain accurate averaging ... for (int i = blockIdx.x * blockDim.x + idx; i < resolution*averaging; i += blockDim.x * gridDim.x){ //Moving average of each output bin according to the 'averaging' value - typically set to 10 out_ptr[((resolution / 2) + i) % resolution] += ( 10 * log10(abs(d_fft_ptr[i].x * d_fft_ptr[i].x + d_fft_ptr[i].y * d_fft_ptr[i].y) / resolution) //DFT bin magnitude ); } // __syncthreads(); if (THRESHOLD){ out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] = ((out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] / averaging + offset) <= threshold) ? 1 : 0; } else { out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] = (out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] / averaging + offset); } // if (out_ptr[blockIdx.x * blockDim.x + idx] <= threshold) out_ptr[blockIdx.x * blockDim.x + idx] = 1; // elseP out_ptr[blockIdx.x * blockDim.x + idx] = 0; out_ptr += resolution; //increment out_ptr by one frame of averages d_fft_ptr += resolution; //increment d_fft_ptr by one frame to maintain rolling average } } /* DEPRECATED static __global__ void avg_out_filter(float* out, cuComplex* d_fft, const int num_wins, const int averaging, const float offset, const int resolution) { //Need to modify for appropriate averaging output //Remember whitespace is a 1 int idx = threadIdx.x; float* out_ptr = &out[0]; cuComplex* d_fft_ptr = &d_fft[0]; const float threshold = -96; const int filter_level = 13; //normally 16 for fiveby, 13 for aggressive bool THRESHOLD = true; bool FILTER = true; bool FIVEBY = false; for (int j = 0; j < num_wins; j++){ for (int i = blockIdx.x * blockDim.x + idx; i < resolution*averaging; i += blockDim.x * gridDim.x){ out_ptr[((resolution / 2) + i) % resolution] += ( 10 * log10(abs(d_fft_ptr[i].x * d_fft_ptr[i].x + d_fft_ptr[i].y * d_fft_ptr[i].y) / resolution) //DFT bin magnitude ); } // __syncthreads(); if (THRESHOLD){ out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] = ((out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] / averaging + offset) <= threshold) ? 1 : 0; } else { out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] = (out_ptr[(resolution / 2 + blockIdx.x * blockDim.x + idx) % resolution] / averaging + offset); } // if (out_ptr[blockIdx.x * blockDim.x + idx] <= threshold) out_ptr[blockIdx.x * blockDim.x + idx] = 1; // elseP out_ptr[blockIdx.x * blockDim.x + idx] = 0; out_ptr += resolution; //increment out_ptr by one frame of averages d_fft_ptr += resolution; //increment d_fft_ptr by number of frames averaged } //Now perform filtering, only if thresholding is performed if (THRESHOLD && FILTER && !FIVEBY){ //Zero out pointer out_ptr = &out[0 + resolution]; //we dont want to filter the first row - at this stage anyway int absthreadidx = blockIdx.x * blockDim.x + threadIdx.x; //I wanted to be more explicit before, shortcutting here //j starts at 1 and ends at num_wins-1 to give the sufficient spacing for the 3x3 kernel for (int j = 1; j < num_wins-1; j++){ if (j == 0) { //first row } else if (j == num_wins - 1) { //last row } if (absthreadidx == 0) { //left edge } else if (absthreadidx == resolution - 1) { //right edge } else { //everything else //If the centre of a kernel = 1, take a 3 by 3 kernel, and sum the edge cells, if greater than 7, can assume this is noise if (out_ptr[absthreadidx] == 0) { //Currently set to detect a lone cell. Can increase this for more agressive filtering. Though the kernel size may have to increase also if ((out_ptr[absthreadidx - resolution - 1] + out_ptr[absthreadidx - resolution] + out_ptr[absthreadidx - resolution + 1] + out_ptr[absthreadidx - 1] + out_ptr[absthreadidx + 1] + out_ptr[absthreadidx + resolution - 1] + out_ptr[absthreadidx + resolution] + out_ptr[absthreadidx + resolution + 1]) > 6) { out_ptr[absthreadidx] = 1; } } } out_ptr += resolution; //next row of output array (as the 2d output is really just a very long 1d array) } } else if (THRESHOLD && FILTER && FIVEBY){ //Zero out pointer out_ptr = &out[0 + 2 * resolution]; //we dont want to filter the first 2 rows - at this stage anyway int absthreadidx = blockIdx.x * blockDim.x + threadIdx.x; //I wanted to be more explicit before, shortcutting here //j starts at 1 and ends at num_wins-1 to give the sufficient spacing for the 3x3 kernel for (int j = 2; j < num_wins - 2; j++){ if (j == 0 || j == 1) { //first row } else if (j == num_wins - 2 || j == num_wins - 1) { //last row } if (absthreadidx == 0 || absthreadidx == 1) { //left edge } else if (absthreadidx == resolution - 1 || absthreadidx == resolution - 2) { //right edge } else { //everything else //If the centre of a kernel = 1, take a 3 by 3 kernel, and sum the edge cells, if greater than filter_level, can assume this is noise if (out_ptr[absthreadidx] == 0) { //Currently set to detect a lone cell. Can increase this for more agressive filtering. Though the kernel size may have to increase also if (( out_ptr[absthreadidx - 2 * resolution - 2] + out_ptr[absthreadidx - 2 * resolution - 1] + out_ptr[absthreadidx - 2 * resolution] + out_ptr[absthreadidx - 2 * resolution + 1] + out_ptr[absthreadidx - 2 * resolution + 2] + out_ptr[absthreadidx - resolution - 2] + out_ptr[absthreadidx - resolution - 1] + out_ptr[absthreadidx - resolution] + out_ptr[absthreadidx - resolution + 1] + out_ptr[absthreadidx - resolution + 2] + out_ptr[absthreadidx - 2] + out_ptr[absthreadidx - 1] + out_ptr[absthreadidx + 1] + out_ptr[absthreadidx + 2] + out_ptr[absthreadidx + resolution - 2] + out_ptr[absthreadidx + resolution - 1] + out_ptr[absthreadidx + resolution] + out_ptr[absthreadidx + resolution + 1] + out_ptr[absthreadidx + resolution + 2] + out_ptr[absthreadidx + 2 * resolution - 2] + out_ptr[absthreadidx + 2 * resolution - 1] + out_ptr[absthreadidx + 2 * resolution] + out_ptr[absthreadidx + 2 * resolution + 1] + out_ptr[absthreadidx + 2 * resolution + 2] ) > filter_level) { out_ptr[absthreadidx] = 1; } } } out_ptr += resolution; //next row of output array (as the 2d output is really just a very long 1d array) } } } /* /* BACKUP LOL static __global__ void avg_out(float* out, cuComplex* d_fft, const int num_wins, const int averaging) { int idx = threadIdx.x; float* out_ptr = &out[0]; cuComplex* d_fft_ptr = &d_fft[0]; for (int j = 0; j < num_wins / averaging; j++){ for (int i = blockIdx.x * blockDim.x + idx; i < NUM_SAMPS*averaging; i += blockDim.x * gridDim.x){ out_ptr[i%NUM_SAMPS] += ( 10 * log10(abs(d_fft_ptr[i].x * d_fft_ptr[i].x + d_fft_ptr[i].y * d_fft_ptr[i].y) / NUM_SAMPS) //DFT bin magnitude ); } out_ptr += NUM_SAMPS; //increment out_ptr by one frame of averages d_fft_ptr += NUM_SAMPS*averaging; //increment d_fft_ptr by number of frames averaged } }*/ void cuda_memcheck() { size_t free_byte; size_t total_byte; cudaError_t cudaStatus; cudaStatus = cudaMemGetInfo(&free_byte, &total_byte); size_t used_byte = total_byte - free_byte; if (cudaStatus != cudaSuccess){ printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cudaStatus)); exit(1); } else printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n", used_byte / 1024.0 / 1024.0, free_byte / 1024.0 / 1024.0, total_byte / 1024.0 / 1024.0); }
b3e4e79d8e6dfbf384d923d2d477bbd9823b6496.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <general_buffer2.hpp> #include <optimizers/adagrad_optimizer.hpp> #include <utils.cuh> #include <utils.hpp> namespace HugeCTR { namespace { template <typename T> __global__ void ada_grad_update_kernel(int len, float *weight, const T* wgrad, T *sum, float lr, const float epsilon, float scaler){ const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) { float gi = TypeConvertFunc<float, T>::convert(wgrad[i]) / scaler; float accum_ = TypeConvertFunc<float, T>::convert(__ldg(&sum[i])); accum_ += gi * gi; float std_ = epsilon + sqrtf(accum_); weight[i] -= lr * gi / std_; sum[i] = TypeConvertFunc<T, float>::convert(accum_); } } } template <typename T> AdaGradOptimizer<T>::AdaGradOptimizer(const Tensor2<float>& weight_main, const Tensor2<T>& wgrad, const std::shared_ptr<BufferBlock2<T>>& opt_buf, const std::shared_ptr<GPUResource>& gpu_resource, float learning_rate, float initial_accu_value, float epsilon, float scaler) : Optimizer(weight_main, gpu_resource, learning_rate, scaler), wgrad_(wgrad), initial_accumulator_value_(initial_accu_value), epsilon_(epsilon) { if(weight_main_.get_num_elements() != wgrad_.get_num_elements()) { CK_THROW_(Error_t::WrongInput, "weight->get_num_elements() != wgrad->get_num_elements()"); } opt_buf->reserve({weight_main.get_num_elements()}, &accum_); } template <typename T> void AdaGradOptimizer<T>::initialize() { CK_CUDA_THROW_(hipMemsetAsync(accum_.get_ptr(), initial_accumulator_value_, accum_.get_size_in_bytes(), gpu_resource_->get_stream())); } template <typename T> void AdaGradOptimizer<T>::update() { CudaDeviceContext context(get_device_id()); const size_t len = weight_main_.get_num_elements(); constexpr size_t block_dim = 256; const size_t grid_dim = (len - 1) / block_dim + 1; float* weight = weight_main_.get_ptr(); const T* wgrad = wgrad_.get_ptr(); hipLaunchKernelGGL(( ada_grad_update_kernel), dim3(grid_dim), dim3(block_dim), 0, gpu_resource_->get_stream(), len, weight, wgrad, accum_.get_ptr(), lr_, epsilon_, scaler_); #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } template class AdaGradOptimizer<float>; template class AdaGradOptimizer<__half>; }
b3e4e79d8e6dfbf384d923d2d477bbd9823b6496.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <general_buffer2.hpp> #include <optimizers/adagrad_optimizer.hpp> #include <utils.cuh> #include <utils.hpp> namespace HugeCTR { namespace { template <typename T> __global__ void ada_grad_update_kernel(int len, float *weight, const T* wgrad, T *sum, float lr, const float epsilon, float scaler){ const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) { float gi = TypeConvertFunc<float, T>::convert(wgrad[i]) / scaler; float accum_ = TypeConvertFunc<float, T>::convert(__ldg(&sum[i])); accum_ += gi * gi; float std_ = epsilon + sqrtf(accum_); weight[i] -= lr * gi / std_; sum[i] = TypeConvertFunc<T, float>::convert(accum_); } } } template <typename T> AdaGradOptimizer<T>::AdaGradOptimizer(const Tensor2<float>& weight_main, const Tensor2<T>& wgrad, const std::shared_ptr<BufferBlock2<T>>& opt_buf, const std::shared_ptr<GPUResource>& gpu_resource, float learning_rate, float initial_accu_value, float epsilon, float scaler) : Optimizer(weight_main, gpu_resource, learning_rate, scaler), wgrad_(wgrad), initial_accumulator_value_(initial_accu_value), epsilon_(epsilon) { if(weight_main_.get_num_elements() != wgrad_.get_num_elements()) { CK_THROW_(Error_t::WrongInput, "weight->get_num_elements() != wgrad->get_num_elements()"); } opt_buf->reserve({weight_main.get_num_elements()}, &accum_); } template <typename T> void AdaGradOptimizer<T>::initialize() { CK_CUDA_THROW_(cudaMemsetAsync(accum_.get_ptr(), initial_accumulator_value_, accum_.get_size_in_bytes(), gpu_resource_->get_stream())); } template <typename T> void AdaGradOptimizer<T>::update() { CudaDeviceContext context(get_device_id()); const size_t len = weight_main_.get_num_elements(); constexpr size_t block_dim = 256; const size_t grid_dim = (len - 1) / block_dim + 1; float* weight = weight_main_.get_ptr(); const T* wgrad = wgrad_.get_ptr(); ada_grad_update_kernel<<<grid_dim, block_dim, 0, gpu_resource_->get_stream()>>>( len, weight, wgrad, accum_.get_ptr(), lr_, epsilon_, scaler_); #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } template class AdaGradOptimizer<float>; template class AdaGradOptimizer<__half>; }
21bd8e984e6e167d0d8418bc3780ef17a119d163.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "odd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *darr = NULL; hipMalloc(&darr, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( odd), dim3(gridBlock),dim3(threadBlock), 0, 0, darr,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( odd), dim3(gridBlock),dim3(threadBlock), 0, 0, darr,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( odd), dim3(gridBlock),dim3(threadBlock), 0, 0, darr,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
21bd8e984e6e167d0d8418bc3780ef17a119d163.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "odd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *darr = NULL; cudaMalloc(&darr, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); odd<<<gridBlock,threadBlock>>>(darr,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { odd<<<gridBlock,threadBlock>>>(darr,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { odd<<<gridBlock,threadBlock>>>(darr,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a3df495c2a7fc5e47e012eb815882cf969ab3182.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/hip/flash_attn/flash_fwd_launch_template.h> namespace pytorch_flash{ template<> void run_mha_fwd_<cutlass::bfloat16_t, 192>(Flash_fwd_params &params, hipStream_t stream) { run_mha_fwd_hdim192<cutlass::bfloat16_t>(params, stream); } } // namespace pytorch_flash
a3df495c2a7fc5e47e012eb815882cf969ab3182.cu
// Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/cuda/flash_attn/flash_fwd_launch_template.h> namespace pytorch_flash{ template<> void run_mha_fwd_<cutlass::bfloat16_t, 192>(Flash_fwd_params &params, cudaStream_t stream) { run_mha_fwd_hdim192<cutlass::bfloat16_t>(params, stream); } } // namespace pytorch_flash
99075aa3104b40c2e753170ba07de3caeb502eed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { for (int i=0;i<b;++i) { for (int j=0;j<m;++j) { int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } } xyz1+=n*3; xyz2+=m*3; idx+=m*nsample; } }
99075aa3104b40c2e753170ba07de3caeb502eed.cu
#include "includes.h" __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { for (int i=0;i<b;++i) { for (int j=0;j<m;++j) { int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } } xyz1+=n*3; xyz2+=m*3; idx+=m*nsample; } }
77bcc384033c8f5f98d53e435f41dededd4942b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) ICG. All rights reserved. * * Institute for Computer Graphics and Vision * Graz University of Technology / Austria * * * This software is distributed WITHOUT ANY WARRANTY; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the above copyright notices for more information. * * * Project : ImageUtilities * Module : Math * Class : none * Language : CUDA * Description : Implementation of Cuda wrappers for arithmetic functions * * Author : Manuel Werlberger * EMail : [email protected] * */ #ifndef IUMATH_ARITHMETIC_CU #define IUMATH_ARITHMETIC_CU #include <iucore/iutextures.cuh> #include <iucutil.h> #include "arithmetic.cuh" namespace iuprivate { /* **************************************************************************** * weighted add * ****************************************************************************/ // kernel: weighted add; 32-bit; __global__ void cuAddWeightedKernel_32f_C1( const float weight1, const float weight2, float* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = weight1*tex2D(tex1_32f_C1__, xx, yy) + weight2*tex2D(tex2_32f_C1__, xx, yy); } } // wrapper: weighted add; 32-bit; void cuAddWeighted(const iu::ImageGpu_32f_C1* src1, const float& weight1, const iu::ImageGpu_32f_C1* src2, const float& weight2, iu::ImageGpu_32f_C1* dst, const IuRect& roi) { // bind textures hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float>(); hipBindTexture2D(0, &tex1_32f_C1__, src1->data(), &channel_desc, src1->width(), src1->height(), src1->pitch()); hipBindTexture2D(0, &tex2_32f_C1__, src2->data(), &channel_desc, src2->width(), src2->height(), src2->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuAddWeightedKernel_32f_C1) , dim3(dimGrid), dim3(dimBlock) , 0, 0, weight1, weight2, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures hipUnbindTexture(&tex1_32f_C1__); hipUnbindTexture(&tex2_32f_C1__); // error check IU_CUDA_CHECK(); } /****************************************************************************** multiplication with factor *******************************************************************************/ // kernel: multiplication with factor; 8-bit; 1-channel __global__ void cuMulCKernel(const unsigned char factor, unsigned char* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { unsigned char val = tex2D(tex1_8u_C1__, xx, yy); dst[oc] = val * factor; } } // wrapper: multiplication with factor; 8-bit; 1-channel void cuMulC(const iu::ImageGpu_8u_C1* src, const unsigned char& factor, iu::ImageGpu_8u_C1* dst, const IuRect& roi) { // bind textures hipChannelFormatDesc channel_desc = hipCreateChannelDesc<uchar1>(); hipBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuMulCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures hipUnbindTexture(&tex1_8u_C1__); // error check IU_CUDA_CHECK(); } // kernel: multiplication with factor; 8-bit; 4-channel __global__ void cuMulCKernel(const uchar4 factor, uchar4* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { uchar4 val = tex2D(tex1_8u_C4__, xx, yy); dst[oc] = val * factor; } } // wrapper: multiplication with factor; 8-bit; 4-channel void cuMulC(const iu::ImageGpu_8u_C4* src, const uchar4& factor, iu::ImageGpu_8u_C4* dst, const IuRect& roi) { // bind textures hipChannelFormatDesc channel_desc = hipCreateChannelDesc<uchar4>(); hipBindTexture2D(0, &tex1_8u_C4__, (uchar4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuMulCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures hipUnbindTexture(&tex1_8u_C4__); // error check IU_CUDA_CHECK(); } // kernel: multiplication with factor; 32-bit; 1-channel __global__ void cuMulCKernel(const float factor, float* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { float val = tex2D(tex1_32f_C1__, xx, yy); dst[oc] = val * factor; } } // wrapper: multiplication with factor; 32-bit; 1-channel void cuMulC(const iu::ImageGpu_32f_C1* src, const float& factor, iu::ImageGpu_32f_C1* dst, const IuRect& roi) { // bind textures hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float>(); hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuMulCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures hipUnbindTexture(&tex1_32f_C1__); // error check IU_CUDA_CHECK(); } // kernel: volume multiplication with factor; 32-bit; 1-channel __global__ void cuVolMulCKernel(const float factor, float* dst, const float*src, const size_t stride, const size_t slice_stride, const int width, const int height, const int depth) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; if(x<width && y<height) { for (int z=0; z<depth; z++) { int vc = oc + z*slice_stride; dst[vc] = src[vc] * factor; } } } // wrapper: volume multiplication with factor; 32-bit; 1-channel void cuMulC(const iu::VolumeGpu_32f_C1* src, const float& factor, iu::VolumeGpu_32f_C1* dst) { // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuVolMulCKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, factor, dst->data(), src->data(), dst->stride(), dst->slice_stride(), dst->width(), dst->height(), dst->depth()); // error check IU_CUDA_CHECK(); } // kernel: multiplication with factor; 32-bit; 2-channel __global__ void cuMulCKernel(const float2 factor, float2* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { float2 val = tex2D(tex1_32f_C2__, xx, yy); dst[oc] = val * factor; } } // wrapper: multiplication with factor; 32-bit; 4-channel void cuMulC(const iu::ImageGpu_32f_C2* src, const float2& factor, iu::ImageGpu_32f_C2* dst, const IuRect& roi) { // bind textures hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float2>(); hipBindTexture2D(0, &tex1_32f_C2__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuMulCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures hipUnbindTexture(&tex1_32f_C2__); // error check IU_CUDA_CHECK(); } // kernel: multiplication with factor; 32-bit; 4-channel __global__ void cuMulCKernel(const float4 factor, float4* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { float4 val = tex2D(tex1_32f_C4__, xx, yy); dst[oc] = val * factor; } } // wrapper: multiplication with factor; 32-bit; 4-channel void cuMulC(const iu::ImageGpu_32f_C4* src, const float4& factor, iu::ImageGpu_32f_C4* dst, const IuRect& roi) { // bind textures hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float4>(); hipBindTexture2D(0, &tex1_32f_C4__, (float4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuMulCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures hipUnbindTexture(&tex1_32f_C4__); // error check IU_CUDA_CHECK(); } /****************************************************************************** add val *******************************************************************************/ // kernel: add val; 8-bit; 1-channel __global__ void cuAddCKernel(const unsigned char val, unsigned char* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = val + tex2D(tex1_8u_C1__, xx, yy); } } // wrapper: add val; 8-bit; 1-channel void cuAddC(const iu::ImageGpu_8u_C1* src, const unsigned char& val, iu::ImageGpu_8u_C1* dst, const IuRect& roi) { // bind textures hipChannelFormatDesc channel_desc = hipCreateChannelDesc<uchar1>(); hipBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuAddCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures hipUnbindTexture(&tex1_8u_C1__); // error check IU_CUDA_CHECK(); } // kernel: add val; 8-bit; 4-channel __global__ void cuAddCKernel(const uchar4 val, uchar4* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { uchar4 value = tex2D(tex1_8u_C4__, xx, yy); value.x = value.x + val.x; value.y = value.y + val.y; value.z = value.z + val.z; value.w = value.w + val.w; dst[oc] = value; } } // wrapper: add val; 8-bit; 4-channel void cuAddC(const iu::ImageGpu_8u_C4* src, const uchar4& val, iu::ImageGpu_8u_C4* dst, const IuRect& roi) { // bind textures hipChannelFormatDesc channel_desc = hipCreateChannelDesc<uchar4>(); hipBindTexture2D(0, &tex1_8u_C4__, (uchar4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuAddCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures hipUnbindTexture(&tex1_8u_C4__); // error check IU_CUDA_CHECK(); } // kernel: add val; 32-bit; 1-channel __global__ void cuAddCKernel(const float val, float* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = val + tex2D(tex1_32f_C1__, xx, yy); } } // wrapper: add val; 32-bit; 1-channel void cuAddC(const iu::ImageGpu_32f_C1* src, const float& val, iu::ImageGpu_32f_C1* dst, const IuRect& roi) { // bind textures hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float>(); hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuAddCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures hipUnbindTexture(&tex1_32f_C1__); // error check IU_CUDA_CHECK(); } // kernel: add val; 32-bit; 2-channel __global__ void cuAddCKernel(const float2 val, float2* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = val + tex2D(tex1_32f_C2__, xx, yy); } } // wrapper: add val; 32-bit; 4-channel void cuAddC(const iu::ImageGpu_32f_C2* src, const float2& val, iu::ImageGpu_32f_C2* dst, const IuRect& roi) { // bind textures hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float2>(); hipBindTexture2D(0, &tex1_32f_C2__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuAddCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures hipUnbindTexture(&tex1_32f_C2__); // error check IU_CUDA_CHECK(); } // kernel: add val; 32-bit; 1-channel __global__ void cuAddCKernel(const float4 val, float4* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = val + tex2D(tex1_32f_C4__, xx, yy); } } // wrapper: add val; 32-bit; 4-channel void cuAddC(const iu::ImageGpu_32f_C4* src, const float4& val, iu::ImageGpu_32f_C4* dst, const IuRect& roi) { // bind textures hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float4>(); hipBindTexture2D(0, &tex1_32f_C4__, (float4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); hipLaunchKernelGGL(( cuAddCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures hipUnbindTexture(&tex1_32f_C4__); // error check IU_CUDA_CHECK(); } } // namespace iuprivate #endif // IUMATH_ARITHMETIC_CU
77bcc384033c8f5f98d53e435f41dededd4942b9.cu
/* * Copyright (c) ICG. All rights reserved. * * Institute for Computer Graphics and Vision * Graz University of Technology / Austria * * * This software is distributed WITHOUT ANY WARRANTY; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the above copyright notices for more information. * * * Project : ImageUtilities * Module : Math * Class : none * Language : CUDA * Description : Implementation of Cuda wrappers for arithmetic functions * * Author : Manuel Werlberger * EMail : [email protected] * */ #ifndef IUMATH_ARITHMETIC_CU #define IUMATH_ARITHMETIC_CU #include <iucore/iutextures.cuh> #include <iucutil.h> #include "arithmetic.cuh" namespace iuprivate { /* **************************************************************************** * weighted add * ****************************************************************************/ // kernel: weighted add; 32-bit; __global__ void cuAddWeightedKernel_32f_C1( const float weight1, const float weight2, float* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = weight1*tex2D(tex1_32f_C1__, xx, yy) + weight2*tex2D(tex2_32f_C1__, xx, yy); } } // wrapper: weighted add; 32-bit; void cuAddWeighted(const iu::ImageGpu_32f_C1* src1, const float& weight1, const iu::ImageGpu_32f_C1* src2, const float& weight2, iu::ImageGpu_32f_C1* dst, const IuRect& roi) { // bind textures cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, &tex1_32f_C1__, src1->data(), &channel_desc, src1->width(), src1->height(), src1->pitch()); cudaBindTexture2D(0, &tex2_32f_C1__, src2->data(), &channel_desc, src2->width(), src2->height(), src2->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuAddWeightedKernel_32f_C1 <<< dimGrid, dimBlock >>> ( weight1, weight2, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures cudaUnbindTexture(&tex1_32f_C1__); cudaUnbindTexture(&tex2_32f_C1__); // error check IU_CUDA_CHECK(); } /****************************************************************************** multiplication with factor *******************************************************************************/ // kernel: multiplication with factor; 8-bit; 1-channel __global__ void cuMulCKernel(const unsigned char factor, unsigned char* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { unsigned char val = tex2D(tex1_8u_C1__, xx, yy); dst[oc] = val * factor; } } // wrapper: multiplication with factor; 8-bit; 1-channel void cuMulC(const iu::ImageGpu_8u_C1* src, const unsigned char& factor, iu::ImageGpu_8u_C1* dst, const IuRect& roi) { // bind textures cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<uchar1>(); cudaBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuMulCKernel <<< dimGrid, dimBlock >>> ( factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures cudaUnbindTexture(&tex1_8u_C1__); // error check IU_CUDA_CHECK(); } // kernel: multiplication with factor; 8-bit; 4-channel __global__ void cuMulCKernel(const uchar4 factor, uchar4* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { uchar4 val = tex2D(tex1_8u_C4__, xx, yy); dst[oc] = val * factor; } } // wrapper: multiplication with factor; 8-bit; 4-channel void cuMulC(const iu::ImageGpu_8u_C4* src, const uchar4& factor, iu::ImageGpu_8u_C4* dst, const IuRect& roi) { // bind textures cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<uchar4>(); cudaBindTexture2D(0, &tex1_8u_C4__, (uchar4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuMulCKernel <<< dimGrid, dimBlock >>> ( factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures cudaUnbindTexture(&tex1_8u_C4__); // error check IU_CUDA_CHECK(); } // kernel: multiplication with factor; 32-bit; 1-channel __global__ void cuMulCKernel(const float factor, float* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { float val = tex2D(tex1_32f_C1__, xx, yy); dst[oc] = val * factor; } } // wrapper: multiplication with factor; 32-bit; 1-channel void cuMulC(const iu::ImageGpu_32f_C1* src, const float& factor, iu::ImageGpu_32f_C1* dst, const IuRect& roi) { // bind textures cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuMulCKernel <<< dimGrid, dimBlock >>> ( factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures cudaUnbindTexture(&tex1_32f_C1__); // error check IU_CUDA_CHECK(); } // kernel: volume multiplication with factor; 32-bit; 1-channel __global__ void cuVolMulCKernel(const float factor, float* dst, const float*src, const size_t stride, const size_t slice_stride, const int width, const int height, const int depth) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; if(x<width && y<height) { for (int z=0; z<depth; z++) { int vc = oc + z*slice_stride; dst[vc] = src[vc] * factor; } } } // wrapper: volume multiplication with factor; 32-bit; 1-channel void cuMulC(const iu::VolumeGpu_32f_C1* src, const float& factor, iu::VolumeGpu_32f_C1* dst) { // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuVolMulCKernel<<<dimGrid, dimBlock>>>(factor, dst->data(), src->data(), dst->stride(), dst->slice_stride(), dst->width(), dst->height(), dst->depth()); // error check IU_CUDA_CHECK(); } // kernel: multiplication with factor; 32-bit; 2-channel __global__ void cuMulCKernel(const float2 factor, float2* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { float2 val = tex2D(tex1_32f_C2__, xx, yy); dst[oc] = val * factor; } } // wrapper: multiplication with factor; 32-bit; 4-channel void cuMulC(const iu::ImageGpu_32f_C2* src, const float2& factor, iu::ImageGpu_32f_C2* dst, const IuRect& roi) { // bind textures cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float2>(); cudaBindTexture2D(0, &tex1_32f_C2__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuMulCKernel <<< dimGrid, dimBlock >>> ( factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures cudaUnbindTexture(&tex1_32f_C2__); // error check IU_CUDA_CHECK(); } // kernel: multiplication with factor; 32-bit; 4-channel __global__ void cuMulCKernel(const float4 factor, float4* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { float4 val = tex2D(tex1_32f_C4__, xx, yy); dst[oc] = val * factor; } } // wrapper: multiplication with factor; 32-bit; 4-channel void cuMulC(const iu::ImageGpu_32f_C4* src, const float4& factor, iu::ImageGpu_32f_C4* dst, const IuRect& roi) { // bind textures cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float4>(); cudaBindTexture2D(0, &tex1_32f_C4__, (float4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuMulCKernel <<< dimGrid, dimBlock >>> ( factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures cudaUnbindTexture(&tex1_32f_C4__); // error check IU_CUDA_CHECK(); } /****************************************************************************** add val *******************************************************************************/ // kernel: add val; 8-bit; 1-channel __global__ void cuAddCKernel(const unsigned char val, unsigned char* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = val + tex2D(tex1_8u_C1__, xx, yy); } } // wrapper: add val; 8-bit; 1-channel void cuAddC(const iu::ImageGpu_8u_C1* src, const unsigned char& val, iu::ImageGpu_8u_C1* dst, const IuRect& roi) { // bind textures cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<uchar1>(); cudaBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuAddCKernel <<< dimGrid, dimBlock >>> ( val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures cudaUnbindTexture(&tex1_8u_C1__); // error check IU_CUDA_CHECK(); } // kernel: add val; 8-bit; 4-channel __global__ void cuAddCKernel(const uchar4 val, uchar4* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { uchar4 value = tex2D(tex1_8u_C4__, xx, yy); value.x = value.x + val.x; value.y = value.y + val.y; value.z = value.z + val.z; value.w = value.w + val.w; dst[oc] = value; } } // wrapper: add val; 8-bit; 4-channel void cuAddC(const iu::ImageGpu_8u_C4* src, const uchar4& val, iu::ImageGpu_8u_C4* dst, const IuRect& roi) { // bind textures cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<uchar4>(); cudaBindTexture2D(0, &tex1_8u_C4__, (uchar4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuAddCKernel <<< dimGrid, dimBlock >>> ( val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures cudaUnbindTexture(&tex1_8u_C4__); // error check IU_CUDA_CHECK(); } // kernel: add val; 32-bit; 1-channel __global__ void cuAddCKernel(const float val, float* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = val + tex2D(tex1_32f_C1__, xx, yy); } } // wrapper: add val; 32-bit; 1-channel void cuAddC(const iu::ImageGpu_32f_C1* src, const float& val, iu::ImageGpu_32f_C1* dst, const IuRect& roi) { // bind textures cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuAddCKernel <<< dimGrid, dimBlock >>> ( val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures cudaUnbindTexture(&tex1_32f_C1__); // error check IU_CUDA_CHECK(); } // kernel: add val; 32-bit; 2-channel __global__ void cuAddCKernel(const float2 val, float2* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = val + tex2D(tex1_32f_C2__, xx, yy); } } // wrapper: add val; 32-bit; 4-channel void cuAddC(const iu::ImageGpu_32f_C2* src, const float2& val, iu::ImageGpu_32f_C2* dst, const IuRect& roi) { // bind textures cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float2>(); cudaBindTexture2D(0, &tex1_32f_C2__, src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuAddCKernel <<< dimGrid, dimBlock >>> ( val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures cudaUnbindTexture(&tex1_32f_C2__); // error check IU_CUDA_CHECK(); } // kernel: add val; 32-bit; 1-channel __global__ void cuAddCKernel(const float4 val, float4* dst, const size_t stride, const int xoff, const int yoff, const int width, const int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; const unsigned int oc = y*stride+x; x += xoff; y += yoff; float xx = x+0.5f; float yy = y+0.5f; if(x>=0 && y>= 0 && x<width && y<height) { dst[oc] = val + tex2D(tex1_32f_C4__, xx, yy); } } // wrapper: add val; 32-bit; 4-channel void cuAddC(const iu::ImageGpu_32f_C4* src, const float4& val, iu::ImageGpu_32f_C4* dst, const IuRect& roi) { // bind textures cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float4>(); cudaBindTexture2D(0, &tex1_32f_C4__, (float4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch()); // fragmentation unsigned int block_size = 16; dim3 dimBlock(block_size, block_size); dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x), iu::divUp(dst->height(), dimBlock.y)); cuAddCKernel <<< dimGrid, dimBlock >>> ( val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height); // unbind textures cudaUnbindTexture(&tex1_32f_C4__); // error check IU_CUDA_CHECK(); } } // namespace iuprivate #endif // IUMATH_ARITHMETIC_CU
1855fba85573f0afe905048f73afae039b5b0abd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto* _pp_var_d __attribute__((unused)) = params_.state_vars[0];\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { _pp_var_d[tid_] = 0.; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type diam = _pp_var_diam_um[node_indexi_]; _pp_var_d[tid_] = diam; } } } // namespace void mechanism_diam_test_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p); if (!p->multiplicity) return; hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(1}), block_dim, 0, *p); } void mechanism_diam_test_gpu_compute_currents_(arb_mechanism_ppack* p) {} void mechanism_diam_test_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p); } void mechanism_diam_test_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_diam_test_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_diam_test_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace testing
1855fba85573f0afe905048f73afae039b5b0abd.cu
#include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto* _pp_var_d __attribute__((unused)) = params_.state_vars[0];\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { _pp_var_d[tid_] = 0.; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type diam = _pp_var_diam_um[node_indexi_]; _pp_var_d[tid_] = diam; } } } // namespace void mechanism_diam_test_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); init<<<grid_dim, block_dim>>>(*p); if (!p->multiplicity) return; multiply<<<dim3{grid_dim, 1}, block_dim>>>(*p); } void mechanism_diam_test_gpu_compute_currents_(arb_mechanism_ppack* p) {} void mechanism_diam_test_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); advance_state<<<grid_dim, block_dim>>>(*p); } void mechanism_diam_test_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_diam_test_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_diam_test_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace testing
ae1ae3d10815029a2c41999c11695222d5a2f960.hip
// !!! This is a file automatically generated by hipify!!! #include "DifferentialGPU.h" #include <hip/hip_runtime.h> #include <math.h> __device__ __constant__ float d_gridSizeR; __device__ __constant__ float d_gridSizeZ; __device__ __constant__ float d_gridSizePhi; __device__ __constant__ float d_fgkIFCRadius; __global__ void nonBoundaryDifferentialCalculation ( float *arrayofArrayV, float *arrayofArrayEr, float *arrayofArrayEz, float *arrayofArrayEphi, const int rows, const int columns, const int phislices, const int symmetry ) { int index, index_x, index_y, index_z; float radius; int mplus, mminus, signplus, signminus; index = (blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; index_x = index / (rows * columns); if (index_x == 0) { index_y = index / rows; } else { index_y = (index % (index_x * rows * columns)) / rows; } index_z = index % columns; // arrayofArrayEr[index] = 0.0; // arrayofArrayEz[index] = 0.0; // arrayofArrayEphi[index] = 0.0; if ((index_x >= 0) && (index_x < phislices) && (index_y > 0) && (index_y < rows - 1) && (index_z > 0) && (index_z < columns - 1)) { mplus = index_x + 1; mminus = index_x - 1; signplus = 1; signminus = 1; // Reflection symmetry in phi (e.g. symmetry at sector boundaries, or half sectors, etc.) if (symmetry == 1) { if (mplus > phislices - 1) { mplus = phislices - 2; } if (mminus < 0) { mminus = 1; } } // Anti-symmetry in phi else if (symmetry == -1) { if (mplus > phislices - 1 ) { mplus = phislices - 2; signplus = -1; } if (mminus < 0) { mminus = 1; signminus = -1; } } // No Symmetries in phi, no boundaries, the calculations is continuous across all phi else { if (mplus > phislices - 1) { mplus = index_x + 1 - phislices; } if (mminus < 0) { mminus = index_x - 1 + phislices; } } radius = d_fgkIFCRadius + index_y * d_gridSizeR; // calculate r direction arrayofArrayEr[index] = -1 * (arrayofArrayV[index_x * rows * columns + (index_y + 1) * columns + index_z] - arrayofArrayV[index_x * rows * columns + (index_y - 1) * columns + index_z]) / (2 * d_gridSizeR); // calculate z direction arrayofArrayEz[index] = -1 * (arrayofArrayV[index_x * rows * columns + index_y * columns + (index_z + 1)] - arrayofArrayV[index_x * rows * columns + index_y * columns + (index_z - 1)]) / (2 * d_gridSizeZ); // calculate phi direction arrayofArrayEphi[index] = -1 * (signplus * arrayofArrayV[mplus * rows * columns + index_y * columns + index_z] - signminus * arrayofArrayV[mminus * rows * columns + index_y * columns + index_z]) / (2 * radius * d_gridSizePhi); /* // DEBUG arrayofArrayEr[index] = index_x; arrayofArrayEz[index] = index_y; arrayofArrayEphi[index] = index_z; */ } } void nonBoundaryDifferentialCalculationGPU ( float *arrayofArrayV, float *arrayofArrayEr, float *arrayofArrayEz, float *arrayofArrayEphi, const int rows, const int columns, const int phislices, const int symmetry, const float fgkIFCRadius, const float fgkOFCRadius, const float fgkTPCZ0 ) { // device array float *d_arrayofArrayV; float *d_arrayofArrayEr; float *d_arrayofArrayEz; float *d_arrayofArrayEphi; hipError_t error; // pre-compute constant const float gridSizeR = (fgkOFCRadius - fgkIFCRadius) / (rows - 1); const float gridSizeZ = fgkTPCZ0 / (columns - 1); const float gridSizePhi = M_PI * 2 / phislices; // device memory allocation hipMalloc( &d_arrayofArrayV, rows * columns * phislices * sizeof(float) ); hipMalloc( &d_arrayofArrayEr, rows * columns * phislices * sizeof(float) ); hipMalloc( &d_arrayofArrayEz, rows * columns * phislices * sizeof(float) ); hipMalloc( &d_arrayofArrayEphi, rows * columns * phislices * sizeof(float) ); error = hipGetLastError(); if ( error != hipSuccess ) { std::cout << "CUDA memory allocation error: " << hipGetErrorString(error) << '\n'; } // copy data from host to device hipMemcpy( d_arrayofArrayV, arrayofArrayV, rows * columns * phislices * sizeof(float), hipMemcpyHostToDevice ); error = hipGetLastError(); if ( error != hipSuccess ) { std::cout << "CUDA memory copy host to device error: " << hipGetErrorString(error) << '\n'; } // copy constant from host to device hipMemcpyToSymbol( d_gridSizeR, &gridSizeR, 1 * sizeof(float), 0, hipMemcpyHostToDevice ); hipMemcpyToSymbol( d_gridSizeZ, &gridSizeZ, 1 * sizeof(float), 0, hipMemcpyHostToDevice ); hipMemcpyToSymbol( d_gridSizePhi, &gridSizePhi, 1 * sizeof(float), 0, hipMemcpyHostToDevice ); hipMemcpyToSymbol( d_fgkIFCRadius, &fgkIFCRadius, 1 * sizeof(float), 0, hipMemcpyHostToDevice ); error = hipGetLastError(); if ( error != hipSuccess ) { std::cout << "CUDA memory copy to constant memory host to device error: " << hipGetErrorString(error) << '\n'; } // set grid size and block size dim3 gridSize((rows / 32) + 1, (columns / 32) + 1, phislices); dim3 blockSize(32, 32); // run the kernel hipLaunchKernelGGL(( nonBoundaryDifferentialCalculation), dim3(gridSize), dim3(blockSize) , 0, 0, d_arrayofArrayV, d_arrayofArrayEr, d_arrayofArrayEz, d_arrayofArrayEphi, rows, columns, phislices, symmetry ); error = hipGetLastError(); if ( error != hipSuccess ) { std::cout << "CUDA kernel run error: " << hipGetErrorString(error) << '\n'; } // copy result from device to host hipMemcpy( arrayofArrayEr, d_arrayofArrayEr, rows * columns * phislices * sizeof(float), hipMemcpyDeviceToHost ); hipMemcpy( arrayofArrayEz, d_arrayofArrayEz, rows * columns * phislices * sizeof(float), hipMemcpyDeviceToHost ); hipMemcpy( arrayofArrayEphi, d_arrayofArrayEphi, rows * columns * phislices * sizeof(float), hipMemcpyDeviceToHost ); error = hipGetLastError(); if ( error != hipSuccess ) { std::cout << "CUDA memory copy device to host error: " << hipGetErrorString(error) << '\n'; } // free device memory hipFree( d_arrayofArrayV ); hipFree( d_arrayofArrayEr ); hipFree( d_arrayofArrayEz ); hipFree( d_arrayofArrayEphi ); error = hipGetLastError(); if ( error != hipSuccess ) { std::cout << "CUDA free allocated memory error: " << hipGetErrorString(error) << '\n'; } } void boundaryDifferentialCalculationCPU ( float *arrayofArrayV, float *arrayofArrayEr, float *arrayofArrayEz, float *arrayofArrayEphi, const int rows, const int columns, const int phislices, const int symmetry, const float fgkIFCRadius, const float fgkOFCRadius, const float fgkTPCZ0 ) { const float gridSizeR = (fgkOFCRadius - fgkIFCRadius) / (rows - 1); const float gridSizeZ = fgkTPCZ0 / (columns - 1); const float gridSizePhi = M_PI * 2 / phislices; // TwoPi() / phislices; float radius; int mplus, mminus, signplus, signminus; for (int m = 0; m < phislices; m++) { mplus = m + 1; mminus = m - 1; signplus = 1; signminus = 1; // Reflection symmetry in phi (e.g. symmetry at sector boundaries, or half sectors, etc.) if (symmetry == 1) { if (mplus > phislices - 1) { mplus = phislices - 2; } if (mminus < 0) { mminus = 1; } } // Anti-symmetry in phi else if (symmetry == -1) { if (mplus > phislices - 1 ) { mplus = phislices - 2; signplus = -1; } if (mminus < 0) { mminus = 1; signminus = -1; } } // No Symmetries in phi, no boundaries, the calculations is continuous across all phi else { if (mplus > phislices - 1) { mplus = m + 1 - phislices; } if (mminus < 0) { mminus = m - 1 + phislices; } } // calculate boundary r for (int j = 0; j < columns; j++) { // forward difference arrayofArrayEr[m * rows * columns + 0 * columns + j] = -1 * (-0.5 * arrayofArrayV[m * rows * columns + 2 * columns + j] + 2.0 * arrayofArrayV[m * rows * columns + 1 * columns + j] - 1.5 * arrayofArrayV[m * rows * columns + 0 * columns + j]) / gridSizeR; // backward difference arrayofArrayEr[m * rows * columns + (rows - 1) * columns + j] = -1 * (1.5 * arrayofArrayV[m * rows * columns + (rows - 1) * columns + j] - 2.0 * arrayofArrayV[m * rows * columns + (rows - 2) * columns + j] + 0.5 * arrayofArrayV[m * rows * columns + (rows - 3) * columns + j]) / gridSizeR; } for (int i = 0; i < rows; i += rows - 1) { radius = fgkIFCRadius + i * gridSizeR; for (int j = 1; j < columns - 1; j++) { // z direction arrayofArrayEz[m * rows * columns + i * columns + j] = -1 * (arrayofArrayV[m * rows * columns + i * columns + (j + 1)] - arrayofArrayV[m * rows * columns + i * columns + (j - 1)]) / (2 * gridSizeZ); // phi direction arrayofArrayEphi[m * rows * columns + i * columns + j] = -1 * (signplus * arrayofArrayV[mplus * rows * columns + i * columns + j] - signminus * arrayofArrayV[mminus * rows * columns + i * columns + j]) / (2 * radius * gridSizePhi); } } // calculate boundary z for (int i = 0; i < rows; i++) { arrayofArrayEz[m * rows * columns + i * columns + 0] = -1 * (-0.5 * arrayofArrayV[m * rows * columns + i * columns + 2] + 2.0 * arrayofArrayV[m * rows * columns + i * columns + 1] - 1.5 * arrayofArrayV[m * rows * columns + i * columns + 0]) / gridSizeZ; arrayofArrayEz[m * rows * columns + i * columns + (columns - 1)] = -1 * (1.5 * arrayofArrayV[m * rows * columns + i * columns + (columns - 1)] - 2.0 * arrayofArrayV[m * rows * columns + i * columns + (columns - 2)] + 0.5 * arrayofArrayV[m * rows * columns + i * columns + (columns - 3)]) / gridSizeZ; } for (int i = 1; i < rows - 1; i++) { radius = fgkIFCRadius + i * gridSizeR; for (int j = 0; j < columns; j += columns - 1) { // r direction arrayofArrayEr[m * rows * columns + i * columns + j] = -1 * (arrayofArrayV[m * rows * columns + (i + 1) * columns + j] - arrayofArrayV[m * rows * columns + (i - 1) * columns + j]) / (2 * gridSizeR); // phi direction arrayofArrayEphi[m * rows * columns + i * columns + j] = -1 * (signplus * arrayofArrayV[mplus * rows * columns + i * columns + j] - signminus * arrayofArrayV[mminus * rows * columns + i * columns + j]) / (2 * radius * gridSizePhi); } } // calculate corner points for Ephi for ( int i = 0; i < rows; i += rows - 1) { radius = fgkIFCRadius + i * gridSizeR; for (int j = 0; j < columns; j += columns - 1) { // phi didrection arrayofArrayEphi[m * rows * columns + i * columns + j] = -1 * (signplus * arrayofArrayV[mplus * rows * columns + i * columns + j] - signminus * arrayofArrayV[mminus * rows * columns + i * columns + j]) / (2 * radius * gridSizePhi); } } } } extern "C" void DifferentialCalculationGPU ( float *arrayofArrayV, float *arrayofArrayEr, float *arrayofArrayEz, float *arrayofArrayEphi, const int rows, const int columns, const int phislices, const int symmetry, const float fgkIFCRadius, const float fgkOFCRadius, const float fgkTPCZ0 ) { nonBoundaryDifferentialCalculationGPU(arrayofArrayV, arrayofArrayEr, arrayofArrayEz, arrayofArrayEphi,rows, columns, phislices, symmetry, fgkIFCRadius,fgkOFCRadius, fgkTPCZ0); boundaryDifferentialCalculationCPU(arrayofArrayV, arrayofArrayEr, arrayofArrayEz, arrayofArrayEphi, rows, columns, phislices, symmetry, fgkIFCRadius,fgkOFCRadius, fgkTPCZ0); }
ae1ae3d10815029a2c41999c11695222d5a2f960.cu
#include "DifferentialGPU.h" #include <cuda.h> #include <math.h> __device__ __constant__ float d_gridSizeR; __device__ __constant__ float d_gridSizeZ; __device__ __constant__ float d_gridSizePhi; __device__ __constant__ float d_fgkIFCRadius; __global__ void nonBoundaryDifferentialCalculation ( float *arrayofArrayV, float *arrayofArrayEr, float *arrayofArrayEz, float *arrayofArrayEphi, const int rows, const int columns, const int phislices, const int symmetry ) { int index, index_x, index_y, index_z; float radius; int mplus, mminus, signplus, signminus; index = (blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; index_x = index / (rows * columns); if (index_x == 0) { index_y = index / rows; } else { index_y = (index % (index_x * rows * columns)) / rows; } index_z = index % columns; // arrayofArrayEr[index] = 0.0; // arrayofArrayEz[index] = 0.0; // arrayofArrayEphi[index] = 0.0; if ((index_x >= 0) && (index_x < phislices) && (index_y > 0) && (index_y < rows - 1) && (index_z > 0) && (index_z < columns - 1)) { mplus = index_x + 1; mminus = index_x - 1; signplus = 1; signminus = 1; // Reflection symmetry in phi (e.g. symmetry at sector boundaries, or half sectors, etc.) if (symmetry == 1) { if (mplus > phislices - 1) { mplus = phislices - 2; } if (mminus < 0) { mminus = 1; } } // Anti-symmetry in phi else if (symmetry == -1) { if (mplus > phislices - 1 ) { mplus = phislices - 2; signplus = -1; } if (mminus < 0) { mminus = 1; signminus = -1; } } // No Symmetries in phi, no boundaries, the calculations is continuous across all phi else { if (mplus > phislices - 1) { mplus = index_x + 1 - phislices; } if (mminus < 0) { mminus = index_x - 1 + phislices; } } radius = d_fgkIFCRadius + index_y * d_gridSizeR; // calculate r direction arrayofArrayEr[index] = -1 * (arrayofArrayV[index_x * rows * columns + (index_y + 1) * columns + index_z] - arrayofArrayV[index_x * rows * columns + (index_y - 1) * columns + index_z]) / (2 * d_gridSizeR); // calculate z direction arrayofArrayEz[index] = -1 * (arrayofArrayV[index_x * rows * columns + index_y * columns + (index_z + 1)] - arrayofArrayV[index_x * rows * columns + index_y * columns + (index_z - 1)]) / (2 * d_gridSizeZ); // calculate phi direction arrayofArrayEphi[index] = -1 * (signplus * arrayofArrayV[mplus * rows * columns + index_y * columns + index_z] - signminus * arrayofArrayV[mminus * rows * columns + index_y * columns + index_z]) / (2 * radius * d_gridSizePhi); /* // DEBUG arrayofArrayEr[index] = index_x; arrayofArrayEz[index] = index_y; arrayofArrayEphi[index] = index_z; */ } } void nonBoundaryDifferentialCalculationGPU ( float *arrayofArrayV, float *arrayofArrayEr, float *arrayofArrayEz, float *arrayofArrayEphi, const int rows, const int columns, const int phislices, const int symmetry, const float fgkIFCRadius, const float fgkOFCRadius, const float fgkTPCZ0 ) { // device array float *d_arrayofArrayV; float *d_arrayofArrayEr; float *d_arrayofArrayEz; float *d_arrayofArrayEphi; cudaError error; // pre-compute constant const float gridSizeR = (fgkOFCRadius - fgkIFCRadius) / (rows - 1); const float gridSizeZ = fgkTPCZ0 / (columns - 1); const float gridSizePhi = M_PI * 2 / phislices; // device memory allocation cudaMalloc( &d_arrayofArrayV, rows * columns * phislices * sizeof(float) ); cudaMalloc( &d_arrayofArrayEr, rows * columns * phislices * sizeof(float) ); cudaMalloc( &d_arrayofArrayEz, rows * columns * phislices * sizeof(float) ); cudaMalloc( &d_arrayofArrayEphi, rows * columns * phislices * sizeof(float) ); error = cudaGetLastError(); if ( error != cudaSuccess ) { std::cout << "CUDA memory allocation error: " << cudaGetErrorString(error) << '\n'; } // copy data from host to device cudaMemcpy( d_arrayofArrayV, arrayofArrayV, rows * columns * phislices * sizeof(float), cudaMemcpyHostToDevice ); error = cudaGetLastError(); if ( error != cudaSuccess ) { std::cout << "CUDA memory copy host to device error: " << cudaGetErrorString(error) << '\n'; } // copy constant from host to device cudaMemcpyToSymbol( d_gridSizeR, &gridSizeR, 1 * sizeof(float), 0, cudaMemcpyHostToDevice ); cudaMemcpyToSymbol( d_gridSizeZ, &gridSizeZ, 1 * sizeof(float), 0, cudaMemcpyHostToDevice ); cudaMemcpyToSymbol( d_gridSizePhi, &gridSizePhi, 1 * sizeof(float), 0, cudaMemcpyHostToDevice ); cudaMemcpyToSymbol( d_fgkIFCRadius, &fgkIFCRadius, 1 * sizeof(float), 0, cudaMemcpyHostToDevice ); error = cudaGetLastError(); if ( error != cudaSuccess ) { std::cout << "CUDA memory copy to constant memory host to device error: " << cudaGetErrorString(error) << '\n'; } // set grid size and block size dim3 gridSize((rows / 32) + 1, (columns / 32) + 1, phislices); dim3 blockSize(32, 32); // run the kernel nonBoundaryDifferentialCalculation<<< gridSize, blockSize >>>( d_arrayofArrayV, d_arrayofArrayEr, d_arrayofArrayEz, d_arrayofArrayEphi, rows, columns, phislices, symmetry ); error = cudaGetLastError(); if ( error != cudaSuccess ) { std::cout << "CUDA kernel run error: " << cudaGetErrorString(error) << '\n'; } // copy result from device to host cudaMemcpy( arrayofArrayEr, d_arrayofArrayEr, rows * columns * phislices * sizeof(float), cudaMemcpyDeviceToHost ); cudaMemcpy( arrayofArrayEz, d_arrayofArrayEz, rows * columns * phislices * sizeof(float), cudaMemcpyDeviceToHost ); cudaMemcpy( arrayofArrayEphi, d_arrayofArrayEphi, rows * columns * phislices * sizeof(float), cudaMemcpyDeviceToHost ); error = cudaGetLastError(); if ( error != cudaSuccess ) { std::cout << "CUDA memory copy device to host error: " << cudaGetErrorString(error) << '\n'; } // free device memory cudaFree( d_arrayofArrayV ); cudaFree( d_arrayofArrayEr ); cudaFree( d_arrayofArrayEz ); cudaFree( d_arrayofArrayEphi ); error = cudaGetLastError(); if ( error != cudaSuccess ) { std::cout << "CUDA free allocated memory error: " << cudaGetErrorString(error) << '\n'; } } void boundaryDifferentialCalculationCPU ( float *arrayofArrayV, float *arrayofArrayEr, float *arrayofArrayEz, float *arrayofArrayEphi, const int rows, const int columns, const int phislices, const int symmetry, const float fgkIFCRadius, const float fgkOFCRadius, const float fgkTPCZ0 ) { const float gridSizeR = (fgkOFCRadius - fgkIFCRadius) / (rows - 1); const float gridSizeZ = fgkTPCZ0 / (columns - 1); const float gridSizePhi = M_PI * 2 / phislices; // TwoPi() / phislices; float radius; int mplus, mminus, signplus, signminus; for (int m = 0; m < phislices; m++) { mplus = m + 1; mminus = m - 1; signplus = 1; signminus = 1; // Reflection symmetry in phi (e.g. symmetry at sector boundaries, or half sectors, etc.) if (symmetry == 1) { if (mplus > phislices - 1) { mplus = phislices - 2; } if (mminus < 0) { mminus = 1; } } // Anti-symmetry in phi else if (symmetry == -1) { if (mplus > phislices - 1 ) { mplus = phislices - 2; signplus = -1; } if (mminus < 0) { mminus = 1; signminus = -1; } } // No Symmetries in phi, no boundaries, the calculations is continuous across all phi else { if (mplus > phislices - 1) { mplus = m + 1 - phislices; } if (mminus < 0) { mminus = m - 1 + phislices; } } // calculate boundary r for (int j = 0; j < columns; j++) { // forward difference arrayofArrayEr[m * rows * columns + 0 * columns + j] = -1 * (-0.5 * arrayofArrayV[m * rows * columns + 2 * columns + j] + 2.0 * arrayofArrayV[m * rows * columns + 1 * columns + j] - 1.5 * arrayofArrayV[m * rows * columns + 0 * columns + j]) / gridSizeR; // backward difference arrayofArrayEr[m * rows * columns + (rows - 1) * columns + j] = -1 * (1.5 * arrayofArrayV[m * rows * columns + (rows - 1) * columns + j] - 2.0 * arrayofArrayV[m * rows * columns + (rows - 2) * columns + j] + 0.5 * arrayofArrayV[m * rows * columns + (rows - 3) * columns + j]) / gridSizeR; } for (int i = 0; i < rows; i += rows - 1) { radius = fgkIFCRadius + i * gridSizeR; for (int j = 1; j < columns - 1; j++) { // z direction arrayofArrayEz[m * rows * columns + i * columns + j] = -1 * (arrayofArrayV[m * rows * columns + i * columns + (j + 1)] - arrayofArrayV[m * rows * columns + i * columns + (j - 1)]) / (2 * gridSizeZ); // phi direction arrayofArrayEphi[m * rows * columns + i * columns + j] = -1 * (signplus * arrayofArrayV[mplus * rows * columns + i * columns + j] - signminus * arrayofArrayV[mminus * rows * columns + i * columns + j]) / (2 * radius * gridSizePhi); } } // calculate boundary z for (int i = 0; i < rows; i++) { arrayofArrayEz[m * rows * columns + i * columns + 0] = -1 * (-0.5 * arrayofArrayV[m * rows * columns + i * columns + 2] + 2.0 * arrayofArrayV[m * rows * columns + i * columns + 1] - 1.5 * arrayofArrayV[m * rows * columns + i * columns + 0]) / gridSizeZ; arrayofArrayEz[m * rows * columns + i * columns + (columns - 1)] = -1 * (1.5 * arrayofArrayV[m * rows * columns + i * columns + (columns - 1)] - 2.0 * arrayofArrayV[m * rows * columns + i * columns + (columns - 2)] + 0.5 * arrayofArrayV[m * rows * columns + i * columns + (columns - 3)]) / gridSizeZ; } for (int i = 1; i < rows - 1; i++) { radius = fgkIFCRadius + i * gridSizeR; for (int j = 0; j < columns; j += columns - 1) { // r direction arrayofArrayEr[m * rows * columns + i * columns + j] = -1 * (arrayofArrayV[m * rows * columns + (i + 1) * columns + j] - arrayofArrayV[m * rows * columns + (i - 1) * columns + j]) / (2 * gridSizeR); // phi direction arrayofArrayEphi[m * rows * columns + i * columns + j] = -1 * (signplus * arrayofArrayV[mplus * rows * columns + i * columns + j] - signminus * arrayofArrayV[mminus * rows * columns + i * columns + j]) / (2 * radius * gridSizePhi); } } // calculate corner points for Ephi for ( int i = 0; i < rows; i += rows - 1) { radius = fgkIFCRadius + i * gridSizeR; for (int j = 0; j < columns; j += columns - 1) { // phi didrection arrayofArrayEphi[m * rows * columns + i * columns + j] = -1 * (signplus * arrayofArrayV[mplus * rows * columns + i * columns + j] - signminus * arrayofArrayV[mminus * rows * columns + i * columns + j]) / (2 * radius * gridSizePhi); } } } } extern "C" void DifferentialCalculationGPU ( float *arrayofArrayV, float *arrayofArrayEr, float *arrayofArrayEz, float *arrayofArrayEphi, const int rows, const int columns, const int phislices, const int symmetry, const float fgkIFCRadius, const float fgkOFCRadius, const float fgkTPCZ0 ) { nonBoundaryDifferentialCalculationGPU(arrayofArrayV, arrayofArrayEr, arrayofArrayEz, arrayofArrayEphi,rows, columns, phislices, symmetry, fgkIFCRadius,fgkOFCRadius, fgkTPCZ0); boundaryDifferentialCalculationCPU(arrayofArrayV, arrayofArrayEr, arrayofArrayEz, arrayofArrayEphi, rows, columns, phislices, symmetry, fgkIFCRadius,fgkOFCRadius, fgkTPCZ0); }
a25fc2248de396d1ecc38a962d24e1732ee05244.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> /* Application adds two vectors declared in the code */ __global__ void vecAdd(int* a, int* b , int* c, int size){ // calculate thread id int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < size){ c[id] = a[id] + b[id]; } } void printVector(int* vec, int size){ printf("[%d", vec[0]); for(int i=1;i<size; i++){ printf(", %d", vec[i]); } printf("]\n"); } int main(int argc, char**argv){ int size = 5; size_t vectorSize = size * sizeof(int); //initialize host variables int* h_vecA = (int*) malloc(vectorSize); int* h_vecB = (int*) malloc(vectorSize); int* h_vecResult = (int*) malloc(vectorSize); for(int i = 0; i < size; i++){ h_vecA[i] = i; h_vecB[i] = i*i; } // initialize device variables int * d_vecA, *d_vecB, *d_vecResult; hipMalloc(&d_vecA, vectorSize); hipMalloc(&d_vecB, vectorSize); hipMalloc(&d_vecResult, vectorSize); hipMemcpy(d_vecA, h_vecA, vectorSize, hipMemcpyHostToDevice); hipMemcpy(d_vecB, h_vecB, vectorSize, hipMemcpyHostToDevice); dim3 blocksPerGrid(1, 1, 1); dim3 threadsPerBlock(size, 1, 1); hipLaunchKernelGGL(( vecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vecA, d_vecB, d_vecResult, size); // copy the result to the device hipMemcpy(h_vecResult, d_vecResult, vectorSize, hipMemcpyDeviceToHost); printf("The result: \n"); printVector(h_vecResult, size); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr,"ERROR: %s\n", hipGetErrorString(error) ); exit(-1); } return 0; }
a25fc2248de396d1ecc38a962d24e1732ee05244.cu
#include <stdio.h> #include <cuda_runtime.h> /* Application adds two vectors declared in the code */ __global__ void vecAdd(int* a, int* b , int* c, int size){ // calculate thread id int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < size){ c[id] = a[id] + b[id]; } } void printVector(int* vec, int size){ printf("[%d", vec[0]); for(int i=1;i<size; i++){ printf(", %d", vec[i]); } printf("]\n"); } int main(int argc, char**argv){ int size = 5; size_t vectorSize = size * sizeof(int); //initialize host variables int* h_vecA = (int*) malloc(vectorSize); int* h_vecB = (int*) malloc(vectorSize); int* h_vecResult = (int*) malloc(vectorSize); for(int i = 0; i < size; i++){ h_vecA[i] = i; h_vecB[i] = i*i; } // initialize device variables int * d_vecA, *d_vecB, *d_vecResult; cudaMalloc(&d_vecA, vectorSize); cudaMalloc(&d_vecB, vectorSize); cudaMalloc(&d_vecResult, vectorSize); cudaMemcpy(d_vecA, h_vecA, vectorSize, cudaMemcpyHostToDevice); cudaMemcpy(d_vecB, h_vecB, vectorSize, cudaMemcpyHostToDevice); dim3 blocksPerGrid(1, 1, 1); dim3 threadsPerBlock(size, 1, 1); vecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_vecA, d_vecB, d_vecResult, size); // copy the result to the device cudaMemcpy(h_vecResult, d_vecResult, vectorSize, cudaMemcpyDeviceToHost); printf("The result: \n"); printVector(h_vecResult, size); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) ); exit(-1); } return 0; }
d13120ca181fc18c90a39d9c8c278e88cc07aff6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 28.11.2018 // #include <ops/specials_cuda.h> ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> __global__ void bitonicArbitraryStepKernelValue(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending) { auto x = static_cast<X*>(vx); auto y = static_cast<Y*>(vy); int tid = threadIdx.x + blockDim.x * blockIdx.x; int half = window>>1; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); } __syncthreads(); //for (int i = 0; i < length; i+= window) /* if window == 4; iterations will be: 0; 4; 8; 12; 16; 20 if gridDim = 3; on first iteration we'll have: 0; 4; 8; on second iteration we'll have: 0 + (3 * 4) = 12; 4 + (3 * 4) = 16; 8 + (3 * 4) = 20 */ int firstPosition; int firstStep; int secondPosition; int secondStep; int WARP_SIZE = 32; int numWarps = (gridDim.x * blockDim.x) / 32; int warpId = tid / WARP_SIZE; int warpIdx = tid % WARP_SIZE; if (half >= 128) { firstPosition = blockIdx.x * window; firstStep = gridDim.x * window; secondPosition = threadIdx.x; secondStep = blockDim.x; } else if (half >= 32) { firstPosition = warpId * window; firstStep = numWarps * window; secondPosition = warpIdx; secondStep = WARP_SIZE; } else { firstPosition = tid * window; firstStep = blockDim.x * gridDim.x * window; secondPosition = 0; secondStep = 1; } for (int i = firstPosition; i < length; i += firstStep) { for (int j = secondPosition; j < half; j += secondStep) { int it = (reverse) ? i + j + half : i + window - j - 1; int ij = i+j; if (it < length && ij < length ) { int posIT = shape::getIndexOffset(it, yShapeInfo, xLength); int posIJ = shape::getIndexOffset(ij, yShapeInfo, xLength); Y v0 = y[posIJ]; Y v1 = y[posIT]; if(!descending == (v0 > v1)) { y[posIJ] = v1; y[posIT] = v0; X xtemp = x[posIJ]; x[posIJ] = x[posIT]; x[posIT] = xtemp; } } } } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> __global__ void bitonicArbitraryStepKernelKey(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending) { auto x = static_cast<X*>(vx); auto y = static_cast<Y*>(vy); int tid = threadIdx.x + blockDim.x * blockIdx.x; int half = window>>1; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); } __syncthreads(); //for (int i = 0; i < length; i+= window) /* if window == 4; iterations will be: 0; 4; 8; 12; 16; 20 if gridDim = 3; on first iteration we'll have: 0; 4; 8; on second iteration we'll have: 0 + (3 * 4) = 12; 4 + (3 * 4) = 16; 8 + (3 * 4) = 20 */ int firstPosition; int firstStep; int secondPosition; int secondStep; int WARP_SIZE = 32; int numWarps = (gridDim.x * blockDim.x) / 32; int warpId = tid / WARP_SIZE; int warpIdx = tid % WARP_SIZE; if (half >= 128) { firstPosition = blockIdx.x * window; firstStep = gridDim.x * window; secondPosition = threadIdx.x; secondStep = blockDim.x; } else if (half >= 32) { firstPosition = warpId * window; firstStep = numWarps * window; secondPosition = warpIdx; secondStep = WARP_SIZE; } else { firstPosition = tid * window; firstStep = blockDim.x * gridDim.x * window; secondPosition = 0; secondStep = 1; } for (int i = firstPosition; i < length; i += firstStep) { for (int j = secondPosition; j < half; j += secondStep) { int it = (reverse) ? i + j + half : i + window - j - 1; int ij = i+j; if (it < length && ij < length ) { int posIT = shape::getIndexOffset(it, xShapeInfo, xLength); int posIJ = shape::getIndexOffset(ij, xShapeInfo, xLength); X v0 = x[posIJ]; X v1 = x[posIT]; if(!descending == (v0 > v1)) { x[posIJ] = v1; x[posIT] = v0; Y ytemp = y[posIJ]; y[posIJ] = y[posIT]; y[posIT] = ytemp; } } } } } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execBitonicArbitraryStepKernel(void *vx, Nd4jLong *xShapeInfo, int window, int length, int reverse, bool descending) { auto x = static_cast<T*>(vx); int tid = threadIdx.x + blockDim.x * blockIdx.x; int half = window>>1; __shared__ T *shmem; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) { extern __shared__ unsigned char shrd[]; shmem = (T *) shrd; xLength = shape::length(xShapeInfo); } __syncthreads(); //for (int i = 0; i < length; i+= window) /* if window == 4; iterations will be: 0; 4; 8; 12; 16; 20 if gridDim = 3; on first iteration we'll have: 0; 4; 8; on second iteration we'll have: 0 + (3 * 4) = 12; 4 + (3 * 4) = 16; 8 + (3 * 4) = 20 */ int firstPosition; int firstStep; int secondPosition; int secondStep; int WARP_SIZE = 32; int numWarps = (gridDim.x * blockDim.x) / 32; int warpId = tid / WARP_SIZE; int warpIdx = tid % WARP_SIZE; if (half >= 128) { firstPosition = blockIdx.x * window; firstStep = gridDim.x * window; secondPosition = threadIdx.x; secondStep = blockDim.x; } else if (half >= 32) { firstPosition = warpId * window; firstStep = numWarps * window; secondPosition = warpIdx; secondStep = WARP_SIZE; } else { firstPosition = tid * window; firstStep = blockDim.x * gridDim.x * window; secondPosition = 0; secondStep = 1; } for (int i = firstPosition; i < length; i += firstStep) { for (int j = secondPosition; j < half; j += secondStep) { int it = (reverse) ? i + j + half : i + window - j - 1; int ij = i+j; if (it < length && ij < length ) { int posIT = shape::getIndexOffset(it, xShapeInfo, xLength); int posIJ = shape::getIndexOffset(ij, xShapeInfo, xLength); shmem[threadIdx.x] = x[posIJ]; shmem[threadIdx.x + blockDim.x] = x[posIT]; if(!descending == (shmem[threadIdx.x] > shmem[threadIdx.x + blockDim.x])) { x[posIJ] = shmem[threadIdx.x + blockDim.x]; x[posIT] = shmem[threadIdx.x]; } } } } } ////////////////////////////////////////////////////////////////////////// template<typename T> __host__ void bitonicArbitraryStepGeneric(dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int window, int length, int reverse, bool descending) { hipLaunchKernelGGL(( execBitonicArbitraryStepKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, window, length, reverse, descending); } template <typename X, typename Y> __host__ void bitonicArbitraryStepGenericKey(dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending) { hipLaunchKernelGGL(( bitonicArbitraryStepKernelKey<X,Y>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, vy, yShapeInfo, window, length, reverse, descending); } template <typename X, typename Y> __host__ void bitonicArbitraryStepGenericValue(dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending) { hipLaunchKernelGGL(( bitonicArbitraryStepKernelValue<X,Y>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, vy, yShapeInfo, window, length, reverse, descending); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT bitonicArbitraryStepGeneric, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int window, int length, int reverse, bool descending), LIBND4J_TYPES); BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT bitonicArbitraryStepGenericKey, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending), LIBND4J_TYPES, LIBND4J_TYPES); BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT bitonicArbitraryStepGenericValue, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending), LIBND4J_TYPES, LIBND4J_TYPES);
d13120ca181fc18c90a39d9c8c278e88cc07aff6.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 28.11.2018 // #include <ops/specials_cuda.h> ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> __global__ void bitonicArbitraryStepKernelValue(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending) { auto x = static_cast<X*>(vx); auto y = static_cast<Y*>(vy); int tid = threadIdx.x + blockDim.x * blockIdx.x; int half = window>>1; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); } __syncthreads(); //for (int i = 0; i < length; i+= window) /* if window == 4; iterations will be: 0; 4; 8; 12; 16; 20 if gridDim = 3; on first iteration we'll have: 0; 4; 8; on second iteration we'll have: 0 + (3 * 4) = 12; 4 + (3 * 4) = 16; 8 + (3 * 4) = 20 */ int firstPosition; int firstStep; int secondPosition; int secondStep; int WARP_SIZE = 32; int numWarps = (gridDim.x * blockDim.x) / 32; int warpId = tid / WARP_SIZE; int warpIdx = tid % WARP_SIZE; if (half >= 128) { firstPosition = blockIdx.x * window; firstStep = gridDim.x * window; secondPosition = threadIdx.x; secondStep = blockDim.x; } else if (half >= 32) { firstPosition = warpId * window; firstStep = numWarps * window; secondPosition = warpIdx; secondStep = WARP_SIZE; } else { firstPosition = tid * window; firstStep = blockDim.x * gridDim.x * window; secondPosition = 0; secondStep = 1; } for (int i = firstPosition; i < length; i += firstStep) { for (int j = secondPosition; j < half; j += secondStep) { int it = (reverse) ? i + j + half : i + window - j - 1; int ij = i+j; if (it < length && ij < length ) { int posIT = shape::getIndexOffset(it, yShapeInfo, xLength); int posIJ = shape::getIndexOffset(ij, yShapeInfo, xLength); Y v0 = y[posIJ]; Y v1 = y[posIT]; if(!descending == (v0 > v1)) { y[posIJ] = v1; y[posIT] = v0; X xtemp = x[posIJ]; x[posIJ] = x[posIT]; x[posIT] = xtemp; } } } } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Y> __global__ void bitonicArbitraryStepKernelKey(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending) { auto x = static_cast<X*>(vx); auto y = static_cast<Y*>(vy); int tid = threadIdx.x + blockDim.x * blockIdx.x; int half = window>>1; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) { xLength = shape::length(xShapeInfo); } __syncthreads(); //for (int i = 0; i < length; i+= window) /* if window == 4; iterations will be: 0; 4; 8; 12; 16; 20 if gridDim = 3; on first iteration we'll have: 0; 4; 8; on second iteration we'll have: 0 + (3 * 4) = 12; 4 + (3 * 4) = 16; 8 + (3 * 4) = 20 */ int firstPosition; int firstStep; int secondPosition; int secondStep; int WARP_SIZE = 32; int numWarps = (gridDim.x * blockDim.x) / 32; int warpId = tid / WARP_SIZE; int warpIdx = tid % WARP_SIZE; if (half >= 128) { firstPosition = blockIdx.x * window; firstStep = gridDim.x * window; secondPosition = threadIdx.x; secondStep = blockDim.x; } else if (half >= 32) { firstPosition = warpId * window; firstStep = numWarps * window; secondPosition = warpIdx; secondStep = WARP_SIZE; } else { firstPosition = tid * window; firstStep = blockDim.x * gridDim.x * window; secondPosition = 0; secondStep = 1; } for (int i = firstPosition; i < length; i += firstStep) { for (int j = secondPosition; j < half; j += secondStep) { int it = (reverse) ? i + j + half : i + window - j - 1; int ij = i+j; if (it < length && ij < length ) { int posIT = shape::getIndexOffset(it, xShapeInfo, xLength); int posIJ = shape::getIndexOffset(ij, xShapeInfo, xLength); X v0 = x[posIJ]; X v1 = x[posIT]; if(!descending == (v0 > v1)) { x[posIJ] = v1; x[posIT] = v0; Y ytemp = y[posIJ]; y[posIJ] = y[posIT]; y[posIT] = ytemp; } } } } } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execBitonicArbitraryStepKernel(void *vx, Nd4jLong *xShapeInfo, int window, int length, int reverse, bool descending) { auto x = static_cast<T*>(vx); int tid = threadIdx.x + blockDim.x * blockIdx.x; int half = window>>1; __shared__ T *shmem; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) { extern __shared__ unsigned char shrd[]; shmem = (T *) shrd; xLength = shape::length(xShapeInfo); } __syncthreads(); //for (int i = 0; i < length; i+= window) /* if window == 4; iterations will be: 0; 4; 8; 12; 16; 20 if gridDim = 3; on first iteration we'll have: 0; 4; 8; on second iteration we'll have: 0 + (3 * 4) = 12; 4 + (3 * 4) = 16; 8 + (3 * 4) = 20 */ int firstPosition; int firstStep; int secondPosition; int secondStep; int WARP_SIZE = 32; int numWarps = (gridDim.x * blockDim.x) / 32; int warpId = tid / WARP_SIZE; int warpIdx = tid % WARP_SIZE; if (half >= 128) { firstPosition = blockIdx.x * window; firstStep = gridDim.x * window; secondPosition = threadIdx.x; secondStep = blockDim.x; } else if (half >= 32) { firstPosition = warpId * window; firstStep = numWarps * window; secondPosition = warpIdx; secondStep = WARP_SIZE; } else { firstPosition = tid * window; firstStep = blockDim.x * gridDim.x * window; secondPosition = 0; secondStep = 1; } for (int i = firstPosition; i < length; i += firstStep) { for (int j = secondPosition; j < half; j += secondStep) { int it = (reverse) ? i + j + half : i + window - j - 1; int ij = i+j; if (it < length && ij < length ) { int posIT = shape::getIndexOffset(it, xShapeInfo, xLength); int posIJ = shape::getIndexOffset(ij, xShapeInfo, xLength); shmem[threadIdx.x] = x[posIJ]; shmem[threadIdx.x + blockDim.x] = x[posIT]; if(!descending == (shmem[threadIdx.x] > shmem[threadIdx.x + blockDim.x])) { x[posIJ] = shmem[threadIdx.x + blockDim.x]; x[posIT] = shmem[threadIdx.x]; } } } } } ////////////////////////////////////////////////////////////////////////// template<typename T> __host__ void bitonicArbitraryStepGeneric(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int window, int length, int reverse, bool descending) { execBitonicArbitraryStepKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, window, length, reverse, descending); } template <typename X, typename Y> __host__ void bitonicArbitraryStepGenericKey(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending) { bitonicArbitraryStepKernelKey<X,Y><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, window, length, reverse, descending); } template <typename X, typename Y> __host__ void bitonicArbitraryStepGenericValue(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending) { bitonicArbitraryStepKernelValue<X,Y><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, window, length, reverse, descending); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT bitonicArbitraryStepGeneric, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int window, int length, int reverse, bool descending), LIBND4J_TYPES); BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT bitonicArbitraryStepGenericKey, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending), LIBND4J_TYPES, LIBND4J_TYPES); BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT bitonicArbitraryStepGenericValue, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int window, int length, int reverse, bool descending), LIBND4J_TYPES, LIBND4J_TYPES);
216765f84972209da50c409fbb3f1594fc717f25.hip
// !!! This is a file automatically generated by hipify!!! #include "math_function.h" #include <hipsparse.h> #include <rocblas.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <cudnn.h> #include <cmath> #include <cstdlib> #include <cstring> #include <stdio.h> #include <sys/time.h> #include "common.h" // for check utils #include "common_cuda.h" float cudnnConvOpt(float *A, float *B, float *C, int batch_size, int in_channels, int in_height, int in_width, int out_channels, int kernel_size, int stride, int pad) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float total_time = 0.0; int out_width = (in_width + 2 * pad - kernel_size) / stride + 1; int out_height = (in_height + 2 * pad - kernel_size) / stride + 1; cudnnHandle_t handle = 0; CUDNN_CHECK(cudnnCreate(&handle)); cudnnDataType_t float_type = CUDNN_DATA_FLOAT; // workspace size_t workspace_fwd_size = 0; void *workspaceData = NULL; // underlying storage // algorithms for forward convolutions cudnnConvolutionFwdAlgo_t fwd_algo; // descriptors cudnnTensorDescriptor_t bottom_desc, top_desc; cudnnFilterDescriptor_t filter_desc; cudnnConvolutionDescriptor_t conv_desc; // Create filter descriptor CUDNN_CHECK(cudnnCreateFilterDescriptor(&filter_desc)); CUDNN_CHECK(cudnnSetFilter4dDescriptor(filter_desc, float_type, CUDNN_TENSOR_NCHW, out_channels, in_channels, kernel_size, kernel_size)); // Create tensor descriptor for data and convolutions CUDNN_CHECK(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&top_desc)); CUDNN_CHECK(cudnnCreateConvolutionDescriptor(&conv_desc)); // initializes the previously created generic Tensor descriptor object into a // 4D tensor CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(bottom_desc, float_type, batch_size, in_channels, in_height, in_width, in_channels * in_height * in_width, in_height * in_width, in_width, 1)); CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(top_desc, float_type, batch_size, out_channels, out_height, out_width, out_channels * out_height * out_width, out_height * out_width, out_width, 1)); CUDNN_CHECK(cudnnSetConvolution2dDescriptor(conv_desc, pad, pad, stride, stride, 1, 1, CUDNN_CROSS_CORRELATION, float_type)); // CUDNN_CONVOLUTION, float_type)); // Now try to start the cuDNN process size_t workspace_limit_bytes, total_memory; CUDA_CHECK(hipMemGetInfo(&workspace_limit_bytes, &total_memory)); int returned_algo_cnt = 0; cudnnConvolutionFwdAlgoPerf_t fwd_algo_perf; CUDNN_CHECK(cudnnFindConvolutionForwardAlgorithm(handle, bottom_desc, filter_desc, conv_desc, top_desc, 1, &returned_algo_cnt, &fwd_algo_perf)); LOG("fwd_algo: %d", fwd_algo_perf.algo); CUDA_CHECK(hipMalloc((void **)&workspaceData, fwd_algo_perf.memory)); float oneval = 1.0, zeroval = 0.0; void *one = (void *)&oneval; void *zero = (void *)&zeroval; hipDeviceSynchronize(); hipEventRecord(start, 0); CUDNN_CHECK(cudnnConvolutionForward(handle, one, bottom_desc, A, filter_desc, B, conv_desc, //fwd_algo, fwd_algo_perf.algo, workspaceData, //workspace_fwd_size, fwd_algo_perf.memory, zero, top_desc, C)); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&total_time, start, stop); LOG("cuDNN convolution with NCHW format Time: %f ms", total_time); CUDA_CHECK(hipFree(workspaceData)); return total_time; } float cudnnConv(float *A, float *B, float *C, int batch_size, int in_channels, int in_height, int in_width, int out_channels, int kernel_size, int stride, int pad) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float total_time = 0.0; int out_width = (in_width + 2 * pad - kernel_size) / stride + 1; int out_height = (in_height + 2 * pad - kernel_size) / stride + 1; cudnnHandle_t handle = 0; CUDNN_CHECK(cudnnCreate(&handle)); cudnnDataType_t float_type = CUDNN_DATA_FLOAT; // workspace size_t workspace_fwd_size = 0; void *workspaceData = NULL; // underlying storage // algorithms for forward convolutions cudnnConvolutionFwdAlgo_t fwd_algo; // descriptors cudnnTensorDescriptor_t bottom_desc, top_desc; cudnnFilterDescriptor_t filter_desc; cudnnConvolutionDescriptor_t conv_desc; // Create filter descriptor CUDNN_CHECK(cudnnCreateFilterDescriptor(&filter_desc)); CUDNN_CHECK(cudnnSetFilter4dDescriptor(filter_desc, float_type, CUDNN_TENSOR_NCHW, out_channels, in_channels, kernel_size, kernel_size)); // Create tensor descriptor for data and convolutions CUDNN_CHECK(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&top_desc)); CUDNN_CHECK(cudnnCreateConvolutionDescriptor(&conv_desc)); // initializes the previously created generic Tensor descriptor object into a // 4D tensor CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(bottom_desc, float_type, batch_size, in_channels, in_height, in_width, in_channels * in_height * in_width, in_height * in_width, in_width, 1)); CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(top_desc, float_type, batch_size, out_channels, out_height, out_width, out_channels * out_height * out_width, out_height * out_width, out_width, 1)); CUDNN_CHECK(cudnnSetConvolution2dDescriptor(conv_desc, pad, pad, stride, stride, 1, 1, CUDNN_CROSS_CORRELATION, float_type)); // CUDNN_CONVOLUTION, float_type)); // Now try to start the cuDNN process size_t workspace_limit_bytes, total_memory; CUDA_CHECK(hipMemGetInfo(&workspace_limit_bytes, &total_memory)); int returned_algo_cnt = 0; cudnnConvolutionFwdAlgoPerf_t fwd_algo_perf; CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm_v7(handle, bottom_desc, filter_desc, conv_desc, top_desc, 1, &returned_algo_cnt, &fwd_algo_perf)); LOG("fwd_algo: %d", fwd_algo_perf.algo); // allocate workspace CUDA_CHECK(hipMalloc((void **)&workspaceData, fwd_algo_perf.memory)); float oneval = 1.0, zeroval = 0.0; void *one = (void *)&oneval; void *zero = (void *)&zeroval; hipDeviceSynchronize(); hipEventRecord(start, 0); CUDNN_CHECK(cudnnConvolutionForward(handle, one, bottom_desc, A, filter_desc, B, conv_desc, fwd_algo_perf.algo, workspaceData, fwd_algo_perf.memory, zero, top_desc, C)); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&total_time, start, stop); LOG("cuDNN convolution with NCHW format Time: %f ms", total_time); CUDA_CHECK(hipFree(workspaceData)); return total_time; } float cudnnConv_algo(float *A, float *B, float *C, int batch_size, int in_channels, int in_height, int in_width, int out_channels, int kernel_size, int stride, int pad, int cudnn_algo) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float total_time = 0.0; int out_width = (in_width + 2 * pad - kernel_size) / stride + 1; int out_height = (in_height + 2 * pad - kernel_size) / stride + 1; // cudnn_handle; cudnnHandle_t handle = 0; CUDNN_CHECK(cudnnCreate(&handle)); cudnnDataType_t float_type = CUDNN_DATA_FLOAT; // workspace size_t workspace_fwd_size = 0; void *workspaceData = NULL; // underlying storage // algorithms for forward convolutions cudnnConvolutionFwdAlgo_t fwd_algo; // descriptors cudnnTensorDescriptor_t bottom_desc, top_desc; cudnnFilterDescriptor_t filter_desc; cudnnConvolutionDescriptor_t conv_desc; // Create filter descriptor CUDNN_CHECK(cudnnCreateFilterDescriptor(&filter_desc)); CUDNN_CHECK(cudnnSetFilter4dDescriptor(filter_desc, float_type, CUDNN_TENSOR_NCHW, out_channels, in_channels, kernel_size, kernel_size)); // Create tensor descriptor for data and convolutions CUDNN_CHECK(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&top_desc)); CUDNN_CHECK(cudnnCreateConvolutionDescriptor(&conv_desc)); // initializes the previously created generic Tensor descriptor object into a // 4D tensor CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(bottom_desc, float_type, batch_size, in_channels, in_height, in_width, in_channels * in_height * in_width, in_height * in_width, in_width, 1)); CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(top_desc, float_type, batch_size, out_channels, out_height, out_width, out_channels * out_height * out_width, out_height * out_width, out_width, 1)); CUDNN_CHECK(cudnnSetConvolution2dDescriptor(conv_desc, pad, pad, stride, stride, 1, 1, CUDNN_CROSS_CORRELATION, float_type // CUDNN_CONVOLUTION, float_type )); fwd_algo = (cudnnConvolutionFwdAlgo_t)cudnn_algo; LOG("fwd_algo: %d", fwd_algo); // get workspace size CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle, bottom_desc, filter_desc, conv_desc, top_desc, fwd_algo, &workspace_fwd_size)); // allocate workspace CUDA_CHECK(hipMalloc((void **)&workspaceData, workspace_fwd_size)); float oneval = 1.0, zeroval = 0.0; void *one = (void *)&oneval; void *zero = (void *)&zeroval; hipDeviceSynchronize(); hipEventRecord(start, 0); CUDNN_CHECK(cudnnConvolutionForward(handle, one, bottom_desc, A, filter_desc, B, conv_desc, fwd_algo, workspaceData, workspace_fwd_size, zero, top_desc, C)); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&total_time, start, stop); LOG("cuDNN convolution with NCHW format Time: %f ms", total_time); CUDA_CHECK(hipFree(workspaceData)); return total_time; } float cublasConv(float *A, float *B, float *C, int batch_size, int in_channels, int in_height, int in_width, int out_channels, int kernel_size, int stride, int pad) { float total_time = 0.0; // struct timeval start, stop; int out_width = (in_width + 2 * pad - kernel_size) / stride + 1; int out_height = (in_height + 2 * pad - kernel_size) / stride + 1; // For each image in the batch, do convolution separately float *input = NULL; float *weights = B; float *output = NULL; float *unroll_buff = NULL; // column buffer float *unroll_matrix = NULL; size_t unroll_matrix_size = sizeof(float) * batch_size * in_channels * kernel_size * kernel_size * out_width * out_height; CUDA_CHECK(hipMalloc((void **)&unroll_matrix, unroll_matrix_size)); unroll_buff = A; if (kernel_size > 1) { total_time += im2col_gpu_batch(A, in_channels, in_height, in_width, kernel_size, kernel_size, pad, pad, stride, stride, unroll_matrix, batch_size); unroll_buff = unroll_matrix; } // M = output_channels, N = output_h * output_w * batch_size // K = input_channels * kernel_size * kernel_size total_time += cublasGEMM(false, false, weights, unroll_buff, C, out_channels, out_height * out_width * batch_size, in_channels * kernel_size * kernel_size); LOG("cublasConv total Time: %f ms", total_time); CUDA_CHECK(hipFree(unroll_matrix)); return total_time; } __global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* data_col) { // output also store in input_channels * r * s * (output_h * output_w) format // sequentially pull out index CUDA_KERNEL_LOOP(index, n) { int w_out = index % width_col; // which output column int h_index = index / width_col; int h_out = h_index % height_col; // which output row int channel_in = h_index / height_col; // which channel int channel_out = channel_in * kernel_h * kernel_w; int h_in = h_out * stride_h - pad_h; // the start of the input region int w_in = w_out * stride_w - pad_w; float* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : (float)(0); data_col_ptr += height_col * width_col; } } } } float im2col_gpu(const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, float* data_col) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float total_time = 0.0; // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; hipDeviceSynchronize(); hipEventRecord(start, 0); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_gpu_kernel), dim3(EXP_GET_BLOCKS(num_kernels)), dim3(EXP_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&total_time, start, stop); LOG("im2col_gpu total Time: %f ms", total_time); return total_time; } __global__ void batch_im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int batch, const int channels, const int height_col, const int width_col, float* data_col) { // output also store in input_channels * r * s * (output_h * output_w) format // sequentially pull out index CUDA_KERNEL_LOOP(id, n) { int index = id % (channels * height_col * width_col); int batch_id = id / (channels * height_col * width_col); int w_out = index % width_col; // which output column int h_index = index / width_col; int h_out = h_index % height_col; // which output row int channel_in = h_index / height_col; // which channel int channel_out = channel_in * kernel_h * kernel_w; int h_in = h_out * stride_h - pad_h; // the start of the input region int w_in = w_out * stride_w - pad_w; float* data_col_ptr = data_col; data_col_ptr += ((channel_out * batch + batch_id) * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += ((batch_id * channels + channel_in) * height + h_in) * width + w_in; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : (float)(0); data_col_ptr += height_col * width_col * batch; } } } } float im2col_gpu_batch(const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, float* data_col, const int batch) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float total_time = 0.0; // We are going to launch channels * height_col * width_col * batch kernels, // each kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; num_kernels *= batch; hipDeviceSynchronize(); hipEventRecord(start, 0); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( batch_im2col_gpu_kernel), dim3(EXP_GET_BLOCKS(num_kernels)), dim3(EXP_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, batch, channels, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&total_time, start, stop); LOG("im2col_gpu_batch total Time: %f ms", total_time); return total_time; } // GEMM with cuBLAS // A size M*K, B size K*N // if M == 1(which means A is a vector), falls into GEMV float cublasGEMM(bool TransA, bool TransB, float *A, float *B, float *C, int M, int N, int K) { hipblasHandle_t handle = NULL; CUBLAS_CHECK(hipblasCreate(&handle)); const float alpha = 1.0; const float beta = 0.0; int lda = (TransA == false) ? K : M; int ldb = (TransB == false) ? N : K; hipblasOperation_t cuTransA = (TransA == false) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == false) ? HIPBLAS_OP_N : HIPBLAS_OP_T; // struct timeval start, stop; // Timer timer; float total_time; hipDeviceSynchronize(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // hipEventRecord(start, 0); if (M == 1) { LOG("USE hipblasSgemv!"); hipEventRecord(start, 0); // timer.start(); // wrong execution actually... CUBLAS_CHECK(hipblasSgemv(handle, cuTransB, K, // number of rows of matrix A, not op(A)! N, // N, // K, &alpha, B, ldb, A, 1, &beta, C, 1 )); hipDeviceSynchronize(); // timer.end(); hipEventRecord(stop, 0); hipEventSynchronize(stop); } else { hipEventRecord(start, 0); // timer.start(); // Note that cuBLAS use Fortran order (column-major) // But we use row-major, so we need to switch the A, B matrix CUBLAS_CHECK(hipblasSgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N )); hipDeviceSynchronize(); // timer.end(); hipEventRecord(stop, 0); hipEventSynchronize(stop); } // total_time = timer.duration_ms(); // LOG("cuBLAS Time: %f ms", total_time); hipEventElapsedTime(&total_time, start, stop); LOG("cuBLAS total Time by cudaEvent: %f ms", total_time); return total_time; } // Do convolution with cuSparse CSR format float csrConv(float *A, float *B, float *C, int batch_size, int in_channels, int in_height, int in_width, int out_channels, int kernel_size, int stride, int pad) { float total_time = 0.0; double im2col_time = 0.0, transpose_time = 0.0; double tmp_time = 0.0; int out_width = (in_width + 2 * pad - kernel_size) / stride + 1; int out_height = (in_height + 2 * pad - kernel_size) / stride + 1; // For each image in the batch, do convolution separately float *input = NULL; float *weights = B; float *unroll_buff = NULL; // column buffer float *unroll_matrix = NULL; size_t unroll_matrix_size = sizeof(float) * batch_size * in_channels * kernel_size * kernel_size * out_width * out_height; CUDA_CHECK(hipMalloc((void **)&unroll_matrix, unroll_matrix_size)); unroll_buff = A; if (kernel_size > 1) { tmp_time = im2col_gpu_batch(A, in_channels, in_height, in_width, kernel_size, kernel_size, pad, pad, stride, stride, unroll_matrix, batch_size); unroll_buff = unroll_matrix; total_time += tmp_time; im2col_time += tmp_time; } // we need to manually transpose the output float *output_trans; CUDA_CHECK(hipMalloc((void **)&output_trans, sizeof(float) * batch_size * out_channels * out_height * out_width)); int N = out_height * out_width * batch_size; int M = out_channels; int K = in_channels * kernel_size * kernel_size; total_time += csrGEMM2(true, true, unroll_buff, weights, output_trans, N, M, K); tmp_time = cublas_transpose(output_trans, C, out_height * out_width * batch_size, out_channels); transpose_time += tmp_time; LOG("csrConv im2col Time: %f, transpose time, %f, total Time: %f ms", im2col_time, transpose_time, total_time); CUDA_CHECK(hipFree(unroll_matrix)); CUDA_CHECK(hipFree(output_trans)); return total_time; } // GEMM with cuSparse CSR format for weights // hipsparseScsrmm // Bt * At = (AB)t // Remember that we have to manually transpose B... // csrGEMM2 support At and Bt together float csrGEMM2(bool TransA, bool TransB, float *A, float *B, float *C, int M, int N, int K) { hipsparseHandle_t handle = 0; CUSPARSE_CHECK(hipsparseCreate(&handle)); hipsparseMatDescr_t descr = 0; CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); int nnz; float *csrVal; int *csrRowPtr; int *csrColIndex; // Note the different of row-major (host) and column-major (device)! int B_row_dev = N; int B_col_dev = K; if (TransB) { float *Bt; CUDA_CHECK(hipMalloc((void**)&Bt, sizeof(float) * N * K)); cublas_transpose(B, Bt, N, K); // printf("Have transposed B...\n"); convert_dense2csr(K, N, Bt, (void **)&csrVal, (void **)&csrRowPtr, (void **)&csrColIndex, &nnz); CUDA_CHECK(hipFree(Bt)); } else { convert_dense2csr(K, N, B, (void **)&csrVal, (void **)&csrRowPtr, (void **)&csrColIndex, &nnz); } const float alpha = 1.0; const float beta = 0.0; int lda = (TransA == false) ? K : M; int A_col_dev = (TransA == false) ? M : K; int ldc = N; // int ldb = (TransB == false) ? N : K; hipsparseOperation_t cuTransA = (TransA == false) ? HIPSPARSE_OPERATION_NON_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE; hipsparseOperation_t cuTransB = HIPSPARSE_OPERATION_NON_TRANSPOSE; float total_time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); // struct timeval start, stop; // hipDeviceSynchronize(); if (M == 1) { LOG("USE csrSgemv!"); // gettimeofday(&start, NULL); hipEventRecord(start, 0); // wrong execution actually.... CUSPARSE_CHECK(hipsparseScsrmv(handle, cuTransB, B_row_dev, B_col_dev, nnz, &alpha, descr, csrVal, csrRowPtr, csrColIndex, A, &beta, C)); // hipDeviceSynchronize(); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); // hipEventElapsedTime(&total_time, start, stop); // gettimeofday(&stop, NULL); } else { // gettimeofday(&start, NULL); hipEventRecord(start, 0); // Note that cuBLAS use Fortran order (column-major) // But we use row-major, so we need to switch the A, B matrix CUSPARSE_CHECK(hipsparseScsrmm2(handle, cuTransB, cuTransA, B_row_dev, M, B_col_dev, nnz, &alpha, descr, csrVal, csrRowPtr, csrColIndex, A, lda, &beta, C, ldc)); // hipDeviceSynchronize(); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); // gettimeofday(&stop, NULL); } hipEventElapsedTime(&total_time, start, stop); // // total_time = (stop.tv_sec - start.tv_sec) * 1000.0 + // (stop.tv_usec - start.tv_usec) / 1000.0; LOG("cuSparse csrMM2 Time: %f ms", total_time); CUDA_CHECK(hipFree(csrVal)); CUDA_CHECK(hipFree(csrRowPtr)); CUDA_CHECK(hipFree(csrColIndex)); return total_time; } // transpose a matrix // in_M and in_N are the input M, N float cublas_transpose(const float *input, float *output, int in_M, int in_N) { const float alpha = 1.0; const float beta = 0.0; hipEvent_t start, stop; float total_time; hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); hipEventRecord(start, 0); // cublas use column major // before trans: M * N (row-major on Host) / N * M (column-majore) // after trans: N * M on Host / M * N on device hipblasHandle_t handle = NULL; CUBLAS_CHECK(hipblasCreate(&handle)); CUBLAS_CHECK(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, in_M, in_N, &alpha, input, in_N, &beta, input, in_N, output, in_M )); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&total_time, start, stop); return total_time; } // convert a matrix A into CSR format // M, N are rows and columns on Host row-major! // as a M*N row-major matrix is equivelent to a N*M column-major matrix, // and the row-major matrix in CSR format is the same to the column-major matrix // in CSC format, // we treat the input M*N row-major matrix as N*M column-major matrix, // use cusparse to compute this columnn-major matrix into CSC, and return it // as a row-major CSR void convert_dense2csr(const int M, const int N, const float *A, void **csrVal, void **csrRowPtr, void **csrColIndex, int *nnz) { hipsparseHandle_t handle = 0; CUSPARSE_CHECK(hipsparseCreate(&handle)); hipsparseMatDescr_t descr = 0; CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); // Note that cusparse follows Fortran order (column-major) // So rows = N, columns = M int *nnzPerRowColumn; int lda = N; CUDA_CHECK(hipMalloc((void**)&nnzPerRowColumn, sizeof(int) * M)); hipDeviceSynchronize(); CUSPARSE_CHECK(hipsparseSnnz(handle, HIPSPARSE_DIRECTION_COLUMN, N, M, descr, A, lda, nnzPerRowColumn, nnz)); hipDeviceSynchronize(); CUDA_CHECK(hipMalloc(csrVal, sizeof(float) * (*nnz))); CUDA_CHECK(hipMalloc(csrRowPtr, sizeof(int) * (M + 1))); CUDA_CHECK(hipMalloc(csrColIndex, sizeof(int) * (*nnz))); // convert to CSC format with column major, which is equivelant to CSR format // with row major CUSPARSE_CHECK(hipsparseSdense2csc(handle, N, M, descr, A, lda, nnzPerRowColumn, (float*)*csrVal, (int*)*csrColIndex, (int*)*csrRowPtr)); hipDeviceSynchronize(); CUDA_CHECK(hipFree(nnzPerRowColumn)); return; } float host_cublasGEMV(bool TransA, float *A, float *x, float *y, int M, int N) { float *d_A = NULL, *d_x = NULL, *d_y = NULL; int size_A = M * N; int size_x = (TransA == false) ? N : M; int size_y = (TransA == false) ? M : N; CUDA_CHECK(hipMalloc( (void**)&d_A, size_A * sizeof(d_A[0]) )); CUDA_CHECK(hipMalloc( (void**)&d_x, size_x * sizeof(d_x[0]) )); CUDA_CHECK(hipMalloc( (void**)&d_y, size_y * sizeof(d_y[0]) )); CUDA_CHECK(hipMemcpy( d_A, A, (size_t)(size_A * sizeof(d_A[0])), hipMemcpyHostToDevice )); CUDA_CHECK(hipMemcpy( d_x, x, (size_t)(size_x * sizeof(d_x[0])), hipMemcpyHostToDevice )); CUDA_CHECK(hipMemcpy( d_y, y, (size_t)(size_y * sizeof(d_y[0])), hipMemcpyHostToDevice )); float time = cublasGEMV(TransA, d_A, d_x, d_y, M, N); CUDA_CHECK(hipMemcpy( y, d_y, (size_t)(size_y * sizeof(d_y[0])), hipMemcpyDeviceToHost )); CUDA_CHECK(hipFree(d_A)); CUDA_CHECK(hipFree(d_x)); CUDA_CHECK(hipFree(d_y)); return time; } float host_csrGEMV(bool TransA, float *A, float *x, float *y, int M, int N) { float *d_A = NULL, *d_x = NULL, *d_y = NULL; int size_A = M * N; int size_x = (TransA == false) ? N : M; int size_y = (TransA == false) ? M : N; CUDA_CHECK(hipMalloc( (void**)&d_A, size_A * sizeof(d_A[0]) )); CUDA_CHECK(hipMalloc( (void**)&d_x, size_x * sizeof(d_x[0]) )); CUDA_CHECK(hipMalloc( (void**)&d_y, size_y * sizeof(d_y[0]) )); CUDA_CHECK(hipMemcpy( d_A, A, (size_t)(size_A * sizeof(d_A[0])), hipMemcpyHostToDevice )); CUDA_CHECK(hipMemcpy( d_x, x, (size_t)(size_x * sizeof(d_x[0])), hipMemcpyHostToDevice )); CUDA_CHECK(hipMemcpy( d_y, y, (size_t)(size_y * sizeof(d_y[0])), hipMemcpyHostToDevice )); float time = csrGEMV(TransA, d_A, d_x, d_y, M, N); CUDA_CHECK(hipMemcpy( y, d_y, (size_t)(size_y * sizeof(d_y[0])), hipMemcpyDeviceToHost )); CUDA_CHECK(hipFree(d_A)); CUDA_CHECK(hipFree(d_x)); CUDA_CHECK(hipFree(d_y)); return time; } // GEMV with cuBLAS // A size M*N, x size N*1, y size M*1 float cublasGEMV(bool TransA, float *A, float *x, float *y, int M, int N) { hipblasHandle_t handle = NULL; CUBLAS_CHECK(hipblasCreate(&handle)); const float alpha = 1.0; const float beta = 0.0; // int lda = (TransA == false) ? M : N; hipblasOperation_t cuTransA = (TransA == false) ? HIPBLAS_OP_N : HIPBLAS_OP_T; // int row = (TransA == false) ? M : N; // int col = (TransA == false) ? N : M; // struct timeval start, stop; // Timer timer; float total_time; hipDeviceSynchronize(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // hipEventRecord(start, 0); hipEventRecord(start, 0); // timer.start(); // wrong execution actually... CUBLAS_CHECK(hipblasSgemv(handle, cuTransA, // row, // col, M, N, &alpha, A, // lda, M, x, 1, &beta, y, 1 )); hipDeviceSynchronize(); // timer.end(); hipEventRecord(stop, 0); hipEventSynchronize(stop); CUDA_POST_KERNEL_CHECK; // total_time = timer.duration_ms(); // LOG("cuBLAS Time: %f ms", total_time); hipEventElapsedTime(&total_time, start, stop); LOG("cuBLAS GEMV total Time by cudaEvent: %f ms", total_time); return total_time; } // GEMV with cuSparse CSR format for matrix float csrGEMV(bool TransA, float *A, float *x, float *y, int M, int N) { hipsparseHandle_t handle = 0; CUSPARSE_CHECK(hipsparseCreate(&handle)); hipsparseMatDescr_t descr = 0; CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); int nnz; float *csrVal; int *csrRowPtr; int *csrColIndex; // Note the different of row-major (host) and column-major (device)! int A_row_dev = M; int A_col_dev = N; convert_dense2csr(M, N, A, (void **)&csrVal, (void **)&csrRowPtr, (void **)&csrColIndex, &nnz); const float alpha = 1.0; const float beta = 0.0; hipsparseOperation_t cuTransA = (TransA == false) ? HIPSPARSE_OPERATION_NON_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE; float total_time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); hipEventRecord(start, 0); CUSPARSE_CHECK(hipsparseScsrmv(handle, cuTransA, A_row_dev, A_col_dev, nnz, &alpha, descr, csrVal, csrRowPtr, csrColIndex, x, &beta, y)); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); CUDA_POST_KERNEL_CHECK; hipEventElapsedTime(&total_time, start, stop); LOG("cuSparse csrMV Time: %f ms", total_time); CUDA_CHECK(hipFree(csrVal)); CUDA_CHECK(hipFree(csrRowPtr)); CUDA_CHECK(hipFree(csrColIndex)); return total_time; }
216765f84972209da50c409fbb3f1594fc717f25.cu
#include "math_function.h" #include <cusparse.h> #include <cublas_v2.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <cudnn.h> #include <cmath> #include <cstdlib> #include <cstring> #include <stdio.h> #include <sys/time.h> #include "common.h" // for check utils #include "common_cuda.h" float cudnnConvOpt(float *A, float *B, float *C, int batch_size, int in_channels, int in_height, int in_width, int out_channels, int kernel_size, int stride, int pad) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float total_time = 0.0; int out_width = (in_width + 2 * pad - kernel_size) / stride + 1; int out_height = (in_height + 2 * pad - kernel_size) / stride + 1; cudnnHandle_t handle = 0; CUDNN_CHECK(cudnnCreate(&handle)); cudnnDataType_t float_type = CUDNN_DATA_FLOAT; // workspace size_t workspace_fwd_size = 0; void *workspaceData = NULL; // underlying storage // algorithms for forward convolutions cudnnConvolutionFwdAlgo_t fwd_algo; // descriptors cudnnTensorDescriptor_t bottom_desc, top_desc; cudnnFilterDescriptor_t filter_desc; cudnnConvolutionDescriptor_t conv_desc; // Create filter descriptor CUDNN_CHECK(cudnnCreateFilterDescriptor(&filter_desc)); CUDNN_CHECK(cudnnSetFilter4dDescriptor(filter_desc, float_type, CUDNN_TENSOR_NCHW, out_channels, in_channels, kernel_size, kernel_size)); // Create tensor descriptor for data and convolutions CUDNN_CHECK(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&top_desc)); CUDNN_CHECK(cudnnCreateConvolutionDescriptor(&conv_desc)); // initializes the previously created generic Tensor descriptor object into a // 4D tensor CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(bottom_desc, float_type, batch_size, in_channels, in_height, in_width, in_channels * in_height * in_width, in_height * in_width, in_width, 1)); CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(top_desc, float_type, batch_size, out_channels, out_height, out_width, out_channels * out_height * out_width, out_height * out_width, out_width, 1)); CUDNN_CHECK(cudnnSetConvolution2dDescriptor(conv_desc, pad, pad, stride, stride, 1, 1, CUDNN_CROSS_CORRELATION, float_type)); // CUDNN_CONVOLUTION, float_type)); // Now try to start the cuDNN process size_t workspace_limit_bytes, total_memory; CUDA_CHECK(cudaMemGetInfo(&workspace_limit_bytes, &total_memory)); int returned_algo_cnt = 0; cudnnConvolutionFwdAlgoPerf_t fwd_algo_perf; CUDNN_CHECK(cudnnFindConvolutionForwardAlgorithm(handle, bottom_desc, filter_desc, conv_desc, top_desc, 1, &returned_algo_cnt, &fwd_algo_perf)); LOG("fwd_algo: %d", fwd_algo_perf.algo); CUDA_CHECK(cudaMalloc((void **)&workspaceData, fwd_algo_perf.memory)); float oneval = 1.0, zeroval = 0.0; void *one = (void *)&oneval; void *zero = (void *)&zeroval; cudaDeviceSynchronize(); cudaEventRecord(start, 0); CUDNN_CHECK(cudnnConvolutionForward(handle, one, bottom_desc, A, filter_desc, B, conv_desc, //fwd_algo, fwd_algo_perf.algo, workspaceData, //workspace_fwd_size, fwd_algo_perf.memory, zero, top_desc, C)); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&total_time, start, stop); LOG("cuDNN convolution with NCHW format Time: %f ms", total_time); CUDA_CHECK(cudaFree(workspaceData)); return total_time; } float cudnnConv(float *A, float *B, float *C, int batch_size, int in_channels, int in_height, int in_width, int out_channels, int kernel_size, int stride, int pad) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float total_time = 0.0; int out_width = (in_width + 2 * pad - kernel_size) / stride + 1; int out_height = (in_height + 2 * pad - kernel_size) / stride + 1; cudnnHandle_t handle = 0; CUDNN_CHECK(cudnnCreate(&handle)); cudnnDataType_t float_type = CUDNN_DATA_FLOAT; // workspace size_t workspace_fwd_size = 0; void *workspaceData = NULL; // underlying storage // algorithms for forward convolutions cudnnConvolutionFwdAlgo_t fwd_algo; // descriptors cudnnTensorDescriptor_t bottom_desc, top_desc; cudnnFilterDescriptor_t filter_desc; cudnnConvolutionDescriptor_t conv_desc; // Create filter descriptor CUDNN_CHECK(cudnnCreateFilterDescriptor(&filter_desc)); CUDNN_CHECK(cudnnSetFilter4dDescriptor(filter_desc, float_type, CUDNN_TENSOR_NCHW, out_channels, in_channels, kernel_size, kernel_size)); // Create tensor descriptor for data and convolutions CUDNN_CHECK(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&top_desc)); CUDNN_CHECK(cudnnCreateConvolutionDescriptor(&conv_desc)); // initializes the previously created generic Tensor descriptor object into a // 4D tensor CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(bottom_desc, float_type, batch_size, in_channels, in_height, in_width, in_channels * in_height * in_width, in_height * in_width, in_width, 1)); CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(top_desc, float_type, batch_size, out_channels, out_height, out_width, out_channels * out_height * out_width, out_height * out_width, out_width, 1)); CUDNN_CHECK(cudnnSetConvolution2dDescriptor(conv_desc, pad, pad, stride, stride, 1, 1, CUDNN_CROSS_CORRELATION, float_type)); // CUDNN_CONVOLUTION, float_type)); // Now try to start the cuDNN process size_t workspace_limit_bytes, total_memory; CUDA_CHECK(cudaMemGetInfo(&workspace_limit_bytes, &total_memory)); int returned_algo_cnt = 0; cudnnConvolutionFwdAlgoPerf_t fwd_algo_perf; CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm_v7(handle, bottom_desc, filter_desc, conv_desc, top_desc, 1, &returned_algo_cnt, &fwd_algo_perf)); LOG("fwd_algo: %d", fwd_algo_perf.algo); // allocate workspace CUDA_CHECK(cudaMalloc((void **)&workspaceData, fwd_algo_perf.memory)); float oneval = 1.0, zeroval = 0.0; void *one = (void *)&oneval; void *zero = (void *)&zeroval; cudaDeviceSynchronize(); cudaEventRecord(start, 0); CUDNN_CHECK(cudnnConvolutionForward(handle, one, bottom_desc, A, filter_desc, B, conv_desc, fwd_algo_perf.algo, workspaceData, fwd_algo_perf.memory, zero, top_desc, C)); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&total_time, start, stop); LOG("cuDNN convolution with NCHW format Time: %f ms", total_time); CUDA_CHECK(cudaFree(workspaceData)); return total_time; } float cudnnConv_algo(float *A, float *B, float *C, int batch_size, int in_channels, int in_height, int in_width, int out_channels, int kernel_size, int stride, int pad, int cudnn_algo) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float total_time = 0.0; int out_width = (in_width + 2 * pad - kernel_size) / stride + 1; int out_height = (in_height + 2 * pad - kernel_size) / stride + 1; // cudnn_handle; cudnnHandle_t handle = 0; CUDNN_CHECK(cudnnCreate(&handle)); cudnnDataType_t float_type = CUDNN_DATA_FLOAT; // workspace size_t workspace_fwd_size = 0; void *workspaceData = NULL; // underlying storage // algorithms for forward convolutions cudnnConvolutionFwdAlgo_t fwd_algo; // descriptors cudnnTensorDescriptor_t bottom_desc, top_desc; cudnnFilterDescriptor_t filter_desc; cudnnConvolutionDescriptor_t conv_desc; // Create filter descriptor CUDNN_CHECK(cudnnCreateFilterDescriptor(&filter_desc)); CUDNN_CHECK(cudnnSetFilter4dDescriptor(filter_desc, float_type, CUDNN_TENSOR_NCHW, out_channels, in_channels, kernel_size, kernel_size)); // Create tensor descriptor for data and convolutions CUDNN_CHECK(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CHECK(cudnnCreateTensorDescriptor(&top_desc)); CUDNN_CHECK(cudnnCreateConvolutionDescriptor(&conv_desc)); // initializes the previously created generic Tensor descriptor object into a // 4D tensor CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(bottom_desc, float_type, batch_size, in_channels, in_height, in_width, in_channels * in_height * in_width, in_height * in_width, in_width, 1)); CUDNN_CHECK(cudnnSetTensor4dDescriptorEx(top_desc, float_type, batch_size, out_channels, out_height, out_width, out_channels * out_height * out_width, out_height * out_width, out_width, 1)); CUDNN_CHECK(cudnnSetConvolution2dDescriptor(conv_desc, pad, pad, stride, stride, 1, 1, CUDNN_CROSS_CORRELATION, float_type // CUDNN_CONVOLUTION, float_type )); fwd_algo = (cudnnConvolutionFwdAlgo_t)cudnn_algo; LOG("fwd_algo: %d", fwd_algo); // get workspace size CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle, bottom_desc, filter_desc, conv_desc, top_desc, fwd_algo, &workspace_fwd_size)); // allocate workspace CUDA_CHECK(cudaMalloc((void **)&workspaceData, workspace_fwd_size)); float oneval = 1.0, zeroval = 0.0; void *one = (void *)&oneval; void *zero = (void *)&zeroval; cudaDeviceSynchronize(); cudaEventRecord(start, 0); CUDNN_CHECK(cudnnConvolutionForward(handle, one, bottom_desc, A, filter_desc, B, conv_desc, fwd_algo, workspaceData, workspace_fwd_size, zero, top_desc, C)); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&total_time, start, stop); LOG("cuDNN convolution with NCHW format Time: %f ms", total_time); CUDA_CHECK(cudaFree(workspaceData)); return total_time; } float cublasConv(float *A, float *B, float *C, int batch_size, int in_channels, int in_height, int in_width, int out_channels, int kernel_size, int stride, int pad) { float total_time = 0.0; // struct timeval start, stop; int out_width = (in_width + 2 * pad - kernel_size) / stride + 1; int out_height = (in_height + 2 * pad - kernel_size) / stride + 1; // For each image in the batch, do convolution separately float *input = NULL; float *weights = B; float *output = NULL; float *unroll_buff = NULL; // column buffer float *unroll_matrix = NULL; size_t unroll_matrix_size = sizeof(float) * batch_size * in_channels * kernel_size * kernel_size * out_width * out_height; CUDA_CHECK(cudaMalloc((void **)&unroll_matrix, unroll_matrix_size)); unroll_buff = A; if (kernel_size > 1) { total_time += im2col_gpu_batch(A, in_channels, in_height, in_width, kernel_size, kernel_size, pad, pad, stride, stride, unroll_matrix, batch_size); unroll_buff = unroll_matrix; } // M = output_channels, N = output_h * output_w * batch_size // K = input_channels * kernel_size * kernel_size total_time += cublasGEMM(false, false, weights, unroll_buff, C, out_channels, out_height * out_width * batch_size, in_channels * kernel_size * kernel_size); LOG("cublasConv total Time: %f ms", total_time); CUDA_CHECK(cudaFree(unroll_matrix)); return total_time; } __global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* data_col) { // output also store in input_channels * r * s * (output_h * output_w) format // sequentially pull out index CUDA_KERNEL_LOOP(index, n) { int w_out = index % width_col; // which output column int h_index = index / width_col; int h_out = h_index % height_col; // which output row int channel_in = h_index / height_col; // which channel int channel_out = channel_in * kernel_h * kernel_w; int h_in = h_out * stride_h - pad_h; // the start of the input region int w_in = w_out * stride_w - pad_w; float* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : (float)(0); data_col_ptr += height_col * width_col; } } } } float im2col_gpu(const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, float* data_col) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float total_time = 0.0; // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; cudaDeviceSynchronize(); cudaEventRecord(start, 0); // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel<<<EXP_GET_BLOCKS(num_kernels), EXP_CUDA_NUM_THREADS>>>( num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&total_time, start, stop); LOG("im2col_gpu total Time: %f ms", total_time); return total_time; } __global__ void batch_im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int batch, const int channels, const int height_col, const int width_col, float* data_col) { // output also store in input_channels * r * s * (output_h * output_w) format // sequentially pull out index CUDA_KERNEL_LOOP(id, n) { int index = id % (channels * height_col * width_col); int batch_id = id / (channels * height_col * width_col); int w_out = index % width_col; // which output column int h_index = index / width_col; int h_out = h_index % height_col; // which output row int channel_in = h_index / height_col; // which channel int channel_out = channel_in * kernel_h * kernel_w; int h_in = h_out * stride_h - pad_h; // the start of the input region int w_in = w_out * stride_w - pad_w; float* data_col_ptr = data_col; data_col_ptr += ((channel_out * batch + batch_id) * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += ((batch_id * channels + channel_in) * height + h_in) * width + w_in; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : (float)(0); data_col_ptr += height_col * width_col * batch; } } } } float im2col_gpu_batch(const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, float* data_col, const int batch) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float total_time = 0.0; // We are going to launch channels * height_col * width_col * batch kernels, // each kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1; int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; num_kernels *= batch; cudaDeviceSynchronize(); cudaEventRecord(start, 0); // NOLINT_NEXT_LINE(whitespace/operators) batch_im2col_gpu_kernel<<<EXP_GET_BLOCKS(num_kernels), EXP_CUDA_NUM_THREADS>>>( num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, batch, channels, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&total_time, start, stop); LOG("im2col_gpu_batch total Time: %f ms", total_time); return total_time; } // GEMM with cuBLAS // A size M*K, B size K*N // if M == 1(which means A is a vector), falls into GEMV float cublasGEMM(bool TransA, bool TransB, float *A, float *B, float *C, int M, int N, int K) { cublasHandle_t handle = NULL; CUBLAS_CHECK(cublasCreate(&handle)); const float alpha = 1.0; const float beta = 0.0; int lda = (TransA == false) ? K : M; int ldb = (TransB == false) ? N : K; cublasOperation_t cuTransA = (TransA == false) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == false) ? CUBLAS_OP_N : CUBLAS_OP_T; // struct timeval start, stop; // Timer timer; float total_time; cudaDeviceSynchronize(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // cudaEventRecord(start, 0); if (M == 1) { LOG("USE cublasSgemv!"); cudaEventRecord(start, 0); // timer.start(); // wrong execution actually... CUBLAS_CHECK(cublasSgemv(handle, cuTransB, K, // number of rows of matrix A, not op(A)! N, // N, // K, &alpha, B, ldb, A, 1, &beta, C, 1 )); cudaDeviceSynchronize(); // timer.end(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } else { cudaEventRecord(start, 0); // timer.start(); // Note that cuBLAS use Fortran order (column-major) // But we use row-major, so we need to switch the A, B matrix CUBLAS_CHECK(cublasSgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N )); cudaDeviceSynchronize(); // timer.end(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } // total_time = timer.duration_ms(); // LOG("cuBLAS Time: %f ms", total_time); cudaEventElapsedTime(&total_time, start, stop); LOG("cuBLAS total Time by cudaEvent: %f ms", total_time); return total_time; } // Do convolution with cuSparse CSR format float csrConv(float *A, float *B, float *C, int batch_size, int in_channels, int in_height, int in_width, int out_channels, int kernel_size, int stride, int pad) { float total_time = 0.0; double im2col_time = 0.0, transpose_time = 0.0; double tmp_time = 0.0; int out_width = (in_width + 2 * pad - kernel_size) / stride + 1; int out_height = (in_height + 2 * pad - kernel_size) / stride + 1; // For each image in the batch, do convolution separately float *input = NULL; float *weights = B; float *unroll_buff = NULL; // column buffer float *unroll_matrix = NULL; size_t unroll_matrix_size = sizeof(float) * batch_size * in_channels * kernel_size * kernel_size * out_width * out_height; CUDA_CHECK(cudaMalloc((void **)&unroll_matrix, unroll_matrix_size)); unroll_buff = A; if (kernel_size > 1) { tmp_time = im2col_gpu_batch(A, in_channels, in_height, in_width, kernel_size, kernel_size, pad, pad, stride, stride, unroll_matrix, batch_size); unroll_buff = unroll_matrix; total_time += tmp_time; im2col_time += tmp_time; } // we need to manually transpose the output float *output_trans; CUDA_CHECK(cudaMalloc((void **)&output_trans, sizeof(float) * batch_size * out_channels * out_height * out_width)); int N = out_height * out_width * batch_size; int M = out_channels; int K = in_channels * kernel_size * kernel_size; total_time += csrGEMM2(true, true, unroll_buff, weights, output_trans, N, M, K); tmp_time = cublas_transpose(output_trans, C, out_height * out_width * batch_size, out_channels); transpose_time += tmp_time; LOG("csrConv im2col Time: %f, transpose time, %f, total Time: %f ms", im2col_time, transpose_time, total_time); CUDA_CHECK(cudaFree(unroll_matrix)); CUDA_CHECK(cudaFree(output_trans)); return total_time; } // GEMM with cuSparse CSR format for weights // cusparseScsrmm // Bt * At = (AB)t // Remember that we have to manually transpose B... // csrGEMM2 support At and Bt together float csrGEMM2(bool TransA, bool TransB, float *A, float *B, float *C, int M, int N, int K) { cusparseHandle_t handle = 0; CUSPARSE_CHECK(cusparseCreate(&handle)); cusparseMatDescr_t descr = 0; CUSPARSE_CHECK(cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); int nnz; float *csrVal; int *csrRowPtr; int *csrColIndex; // Note the different of row-major (host) and column-major (device)! int B_row_dev = N; int B_col_dev = K; if (TransB) { float *Bt; CUDA_CHECK(cudaMalloc((void**)&Bt, sizeof(float) * N * K)); cublas_transpose(B, Bt, N, K); // printf("Have transposed B...\n"); convert_dense2csr(K, N, Bt, (void **)&csrVal, (void **)&csrRowPtr, (void **)&csrColIndex, &nnz); CUDA_CHECK(cudaFree(Bt)); } else { convert_dense2csr(K, N, B, (void **)&csrVal, (void **)&csrRowPtr, (void **)&csrColIndex, &nnz); } const float alpha = 1.0; const float beta = 0.0; int lda = (TransA == false) ? K : M; int A_col_dev = (TransA == false) ? M : K; int ldc = N; // int ldb = (TransB == false) ? N : K; cusparseOperation_t cuTransA = (TransA == false) ? CUSPARSE_OPERATION_NON_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE; cusparseOperation_t cuTransB = CUSPARSE_OPERATION_NON_TRANSPOSE; float total_time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); // struct timeval start, stop; // cudaDeviceSynchronize(); if (M == 1) { LOG("USE csrSgemv!"); // gettimeofday(&start, NULL); cudaEventRecord(start, 0); // wrong execution actually.... CUSPARSE_CHECK(cusparseScsrmv(handle, cuTransB, B_row_dev, B_col_dev, nnz, &alpha, descr, csrVal, csrRowPtr, csrColIndex, A, &beta, C)); // cudaDeviceSynchronize(); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // cudaEventElapsedTime(&total_time, start, stop); // gettimeofday(&stop, NULL); } else { // gettimeofday(&start, NULL); cudaEventRecord(start, 0); // Note that cuBLAS use Fortran order (column-major) // But we use row-major, so we need to switch the A, B matrix CUSPARSE_CHECK(cusparseScsrmm2(handle, cuTransB, cuTransA, B_row_dev, M, B_col_dev, nnz, &alpha, descr, csrVal, csrRowPtr, csrColIndex, A, lda, &beta, C, ldc)); // cudaDeviceSynchronize(); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // gettimeofday(&stop, NULL); } cudaEventElapsedTime(&total_time, start, stop); // // total_time = (stop.tv_sec - start.tv_sec) * 1000.0 + // (stop.tv_usec - start.tv_usec) / 1000.0; LOG("cuSparse csrMM2 Time: %f ms", total_time); CUDA_CHECK(cudaFree(csrVal)); CUDA_CHECK(cudaFree(csrRowPtr)); CUDA_CHECK(cudaFree(csrColIndex)); return total_time; } // transpose a matrix // in_M and in_N are the input M, N float cublas_transpose(const float *input, float *output, int in_M, int in_N) { const float alpha = 1.0; const float beta = 0.0; cudaEvent_t start, stop; float total_time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); cudaEventRecord(start, 0); // cublas use column major // before trans: M * N (row-major on Host) / N * M (column-majore) // after trans: N * M on Host / M * N on device cublasHandle_t handle = NULL; CUBLAS_CHECK(cublasCreate(&handle)); CUBLAS_CHECK(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, in_M, in_N, &alpha, input, in_N, &beta, input, in_N, output, in_M )); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&total_time, start, stop); return total_time; } // convert a matrix A into CSR format // M, N are rows and columns on Host row-major! // as a M*N row-major matrix is equivelent to a N*M column-major matrix, // and the row-major matrix in CSR format is the same to the column-major matrix // in CSC format, // we treat the input M*N row-major matrix as N*M column-major matrix, // use cusparse to compute this columnn-major matrix into CSC, and return it // as a row-major CSR void convert_dense2csr(const int M, const int N, const float *A, void **csrVal, void **csrRowPtr, void **csrColIndex, int *nnz) { cusparseHandle_t handle = 0; CUSPARSE_CHECK(cusparseCreate(&handle)); cusparseMatDescr_t descr = 0; CUSPARSE_CHECK(cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); // Note that cusparse follows Fortran order (column-major) // So rows = N, columns = M int *nnzPerRowColumn; int lda = N; CUDA_CHECK(cudaMalloc((void**)&nnzPerRowColumn, sizeof(int) * M)); cudaDeviceSynchronize(); CUSPARSE_CHECK(cusparseSnnz(handle, CUSPARSE_DIRECTION_COLUMN, N, M, descr, A, lda, nnzPerRowColumn, nnz)); cudaDeviceSynchronize(); CUDA_CHECK(cudaMalloc(csrVal, sizeof(float) * (*nnz))); CUDA_CHECK(cudaMalloc(csrRowPtr, sizeof(int) * (M + 1))); CUDA_CHECK(cudaMalloc(csrColIndex, sizeof(int) * (*nnz))); // convert to CSC format with column major, which is equivelant to CSR format // with row major CUSPARSE_CHECK(cusparseSdense2csc(handle, N, M, descr, A, lda, nnzPerRowColumn, (float*)*csrVal, (int*)*csrColIndex, (int*)*csrRowPtr)); cudaDeviceSynchronize(); CUDA_CHECK(cudaFree(nnzPerRowColumn)); return; } float host_cublasGEMV(bool TransA, float *A, float *x, float *y, int M, int N) { float *d_A = NULL, *d_x = NULL, *d_y = NULL; int size_A = M * N; int size_x = (TransA == false) ? N : M; int size_y = (TransA == false) ? M : N; CUDA_CHECK(cudaMalloc( (void**)&d_A, size_A * sizeof(d_A[0]) )); CUDA_CHECK(cudaMalloc( (void**)&d_x, size_x * sizeof(d_x[0]) )); CUDA_CHECK(cudaMalloc( (void**)&d_y, size_y * sizeof(d_y[0]) )); CUDA_CHECK(cudaMemcpy( d_A, A, (size_t)(size_A * sizeof(d_A[0])), cudaMemcpyHostToDevice )); CUDA_CHECK(cudaMemcpy( d_x, x, (size_t)(size_x * sizeof(d_x[0])), cudaMemcpyHostToDevice )); CUDA_CHECK(cudaMemcpy( d_y, y, (size_t)(size_y * sizeof(d_y[0])), cudaMemcpyHostToDevice )); float time = cublasGEMV(TransA, d_A, d_x, d_y, M, N); CUDA_CHECK(cudaMemcpy( y, d_y, (size_t)(size_y * sizeof(d_y[0])), cudaMemcpyDeviceToHost )); CUDA_CHECK(cudaFree(d_A)); CUDA_CHECK(cudaFree(d_x)); CUDA_CHECK(cudaFree(d_y)); return time; } float host_csrGEMV(bool TransA, float *A, float *x, float *y, int M, int N) { float *d_A = NULL, *d_x = NULL, *d_y = NULL; int size_A = M * N; int size_x = (TransA == false) ? N : M; int size_y = (TransA == false) ? M : N; CUDA_CHECK(cudaMalloc( (void**)&d_A, size_A * sizeof(d_A[0]) )); CUDA_CHECK(cudaMalloc( (void**)&d_x, size_x * sizeof(d_x[0]) )); CUDA_CHECK(cudaMalloc( (void**)&d_y, size_y * sizeof(d_y[0]) )); CUDA_CHECK(cudaMemcpy( d_A, A, (size_t)(size_A * sizeof(d_A[0])), cudaMemcpyHostToDevice )); CUDA_CHECK(cudaMemcpy( d_x, x, (size_t)(size_x * sizeof(d_x[0])), cudaMemcpyHostToDevice )); CUDA_CHECK(cudaMemcpy( d_y, y, (size_t)(size_y * sizeof(d_y[0])), cudaMemcpyHostToDevice )); float time = csrGEMV(TransA, d_A, d_x, d_y, M, N); CUDA_CHECK(cudaMemcpy( y, d_y, (size_t)(size_y * sizeof(d_y[0])), cudaMemcpyDeviceToHost )); CUDA_CHECK(cudaFree(d_A)); CUDA_CHECK(cudaFree(d_x)); CUDA_CHECK(cudaFree(d_y)); return time; } // GEMV with cuBLAS // A size M*N, x size N*1, y size M*1 float cublasGEMV(bool TransA, float *A, float *x, float *y, int M, int N) { cublasHandle_t handle = NULL; CUBLAS_CHECK(cublasCreate(&handle)); const float alpha = 1.0; const float beta = 0.0; // int lda = (TransA == false) ? M : N; cublasOperation_t cuTransA = (TransA == false) ? CUBLAS_OP_N : CUBLAS_OP_T; // int row = (TransA == false) ? M : N; // int col = (TransA == false) ? N : M; // struct timeval start, stop; // Timer timer; float total_time; cudaDeviceSynchronize(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // cudaEventRecord(start, 0); cudaEventRecord(start, 0); // timer.start(); // wrong execution actually... CUBLAS_CHECK(cublasSgemv(handle, cuTransA, // row, // col, M, N, &alpha, A, // lda, M, x, 1, &beta, y, 1 )); cudaDeviceSynchronize(); // timer.end(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_POST_KERNEL_CHECK; // total_time = timer.duration_ms(); // LOG("cuBLAS Time: %f ms", total_time); cudaEventElapsedTime(&total_time, start, stop); LOG("cuBLAS GEMV total Time by cudaEvent: %f ms", total_time); return total_time; } // GEMV with cuSparse CSR format for matrix float csrGEMV(bool TransA, float *A, float *x, float *y, int M, int N) { cusparseHandle_t handle = 0; CUSPARSE_CHECK(cusparseCreate(&handle)); cusparseMatDescr_t descr = 0; CUSPARSE_CHECK(cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); int nnz; float *csrVal; int *csrRowPtr; int *csrColIndex; // Note the different of row-major (host) and column-major (device)! int A_row_dev = M; int A_col_dev = N; convert_dense2csr(M, N, A, (void **)&csrVal, (void **)&csrRowPtr, (void **)&csrColIndex, &nnz); const float alpha = 1.0; const float beta = 0.0; cusparseOperation_t cuTransA = (TransA == false) ? CUSPARSE_OPERATION_NON_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE; float total_time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); cudaEventRecord(start, 0); CUSPARSE_CHECK(cusparseScsrmv(handle, cuTransA, A_row_dev, A_col_dev, nnz, &alpha, descr, csrVal, csrRowPtr, csrColIndex, x, &beta, y)); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_POST_KERNEL_CHECK; cudaEventElapsedTime(&total_time, start, stop); LOG("cuSparse csrMV Time: %f ms", total_time); CUDA_CHECK(cudaFree(csrVal)); CUDA_CHECK(cudaFree(csrRowPtr)); CUDA_CHECK(cudaFree(csrColIndex)); return total_time; }
ba9f66a0433514d313f12541566d31b8dcf009ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //scan.cu //#include "kernel.hip" #include "comm.h" #include "wtime.h" #include "iostream" #define max_thd 256 #define max_block 256 #define thread_limit 256 #define block_limit 1024 #define GPU_COWORKER 1 graph * mygraph; __global__ void block_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/max_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ mycount++; bot = top + max_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/max_thd; top = top*m/max_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += GPU_COWORKER * gridDim.x*blockDim.x/ max_thd; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]+=val; // count[blockIdx.x]=val; } } __global__ void warp_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } tid += GPU_COWORKER* blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } //---------------------------------------------------------------------------------------- __global__ void classify_kernel //step 1: classify the edge list into different arrays ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, //inputs index_t* small_num, index_t* mid_num, index_t* large_num //outputs: small/large head, adjacent, and number by thread ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t small_offset=0; index_t mid_offset=0; index_t large_offset=0; //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit && n>0){ small_offset++; } else if(n>0){ //could be more then 2 catigories // else{ mid_offset++; } else { //could be more then 2 catigories large_offset++; } } } small_num[tid] = small_offset; mid_num[tid] = mid_offset; large_num[tid] = large_offset; } __global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256 ( index_t* data, index_t* block_offset ) { //step 1: each block do prefix sum inside int tid = threadIdx.x +blockIdx.x*blockDim.x; __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = data[tid]; __syncthreads(); index_t val=0; for(int i=0; i<=threadIdx.x; i++){ val += temp_in[i]; } __syncthreads(); if(threadIdx.x==255){ block_offset[blockIdx.x] = val; } data[tid] = val; __syncthreads(); } __global__ void prefix_kernel_2 ( index_t* block_offset ) { //step 2: collect each block's offset and do prefix for this set __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = block_offset[threadIdx.x]; __syncthreads(); index_t val=0; for(int i=0; i<threadIdx.x; i++){ val += temp_in[i]; } // val = temp_in[threadIdx.x]; block_offset[threadIdx.x] = val; __syncthreads(); } __global__ void prefix_kernel_3 ( index_t* data, index_t* block_offset ) { //step 3: update by adding block offset int tid = threadIdx.x + blockIdx.x*blockDim.x; index_t val = data[tid]; index_t offset = block_offset[blockIdx.x]; val += offset; data[tid] = val; __syncthreads(); } __global__ void collect_kernel ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, index_t* small_num, index_t* mid_num, index_t* large_num, index_t N1, index_t N2, vertex_t* dest_head, vertex_t* dest_adj ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t thd_base_small = 0; index_t thd_base_mid = N1; index_t thd_base_large = N1+N2; if(tid!=0){ thd_base_small = small_num[tid-1]; thd_base_mid = N1 + mid_num[tid-1]; thd_base_large = N1 + N2 + large_num[tid-1]; } //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; index_t small_offset = thd_base_small; index_t mid_offset = thd_base_mid; index_t large_offset = thd_base_large; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit && n>0){ dest_head[small_offset] = head; dest_adj [small_offset] = adj; small_offset++; } else if(n>0){ //could be more then 2 catigories // else{ dest_head[mid_offset] = head; dest_adj [mid_offset] = adj; mid_offset++; } else { //could be more then 2 catigories dest_head[large_offset] = head; dest_adj [large_offset] = adj; large_offset++; } } } } __global__ void reduce_kernel2(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void graph::scan(){ hipSetDevice(4); vertex_t* dev_adj; vertex_t* dev_head; index_t* dev_begin; index_t* dev_count; H_ERR(hipMalloc(&dev_adj, upperEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&dev_head, upperEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) ); index_t* block_offset; H_ERR(hipMalloc(&block_offset, max_block*sizeof(index_t)) ); vertex_t* classified_head; vertex_t* classified_adj; index_t* small_num; index_t* mid_num; index_t* large_num; vertex_t* src_head; vertex_t* src_adj; H_ERR(hipMalloc(&small_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&mid_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&large_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&src_head, upperEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&src_adj, upperEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMemcpy(src_adj, upperAdj, upperEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(src_head, upperHead, upperEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(dev_begin, upperBegin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) ); dev_adj = src_adj; dev_head= src_head; // H_ERR(hipMemcpy(src_degree, degree, vert_count*sizeof(index_t), hipMemcpyHostToDevice) ); H_ERR(hipMalloc(&classified_head, upperEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&classified_adj, upperEdgeCount*sizeof(vertex_t)) ); // double time1=wtime(); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( classify_kernel) , dim3(max_block),dim3(max_thd), 0, 0, src_adj, src_head, dev_begin, upperEdgeCount, small_num, mid_num, large_num ); H_ERR(hipDeviceSynchronize() ); //test for prefix sum hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, small_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, small_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, mid_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, mid_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, large_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, large_num, block_offset); H_ERR(hipDeviceSynchronize() ); index_t N1,N2,N3; H_ERR(hipMemcpy(&N1 , &small_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); H_ERR(hipMemcpy(&N2 , &mid_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); H_ERR(hipMemcpy(&N3 , &large_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); H_ERR(hipDeviceSynchronize() ); // cout<<"N1 = "<<N1<<"\n"; // cout<<"N2 = "<<N2<<"\n"; // cout<<"N3 = "<<N3<<"\n"; hipLaunchKernelGGL(( collect_kernel) , dim3(max_block),dim3(max_thd), 0, 0, src_adj, src_head, dev_begin, upperEdgeCount, small_num, mid_num, large_num, N1, N2, classified_head, classified_adj ); H_ERR(hipDeviceSynchronize() ); double time2=wtime(); hipLaunchKernelGGL(( warp_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0, classified_head, classified_adj, dev_adj, // dev_degree, dev_begin, 0, N1, dev_count ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( block_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0, classified_head, classified_adj, //dev_head, //dev_adj, dev_adj, // dev_degree, dev_begin, N1, N1+N2, // upperEdgeCount, dev_count ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_kernel2) , dim3(1),dim3(1), 0, 0, dev_count); H_ERR(hipDeviceSynchronize() ); H_ERR(hipMemcpy(&count[0], dev_count, sizeof(index_t), hipMemcpyDeviceToHost)); double time4 = wtime(); cout<<"total count = "<<count[0]<<endl; cout<<"GPU time = "<<time4-time2<<" seconds"<<endl; H_ERR(hipFree(small_num) ); H_ERR(hipFree(large_num) ); H_ERR(hipFree(classified_head) ); H_ERR(hipFree(classified_adj) ); H_ERR(hipFree(src_head) ); H_ERR(hipFree(src_adj) ); H_ERR(hipFree(dev_begin) ); H_ERR(hipFree(block_offset) ); H_ERR(hipFree(dev_count) ); }
ba9f66a0433514d313f12541566d31b8dcf009ea.cu
//scan.cu //#include "kernel.cu" #include "comm.h" #include "wtime.h" #include "iostream" #define max_thd 256 #define max_block 256 #define thread_limit 256 #define block_limit 1024 #define GPU_COWORKER 1 graph * mygraph; __global__ void block_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd; int i = threadIdx.x% max_thd; index_t mycount=0; // __shared__ vertex_t cache[256]; __shared__ index_t local[max_thd]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/max_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = max_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ mycount++; bot = top + max_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/max_thd; top = top*m/max_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += max_thd; } tid += GPU_COWORKER * gridDim.x*blockDim.x/ max_thd; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]+=val; // count[blockIdx.x]=val; } } __global__ void warp_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[max_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } tid += GPU_COWORKER* blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } //---------------------------------------------------------------------------------------- __global__ void classify_kernel //step 1: classify the edge list into different arrays ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, //inputs index_t* small_num, index_t* mid_num, index_t* large_num //outputs: small/large head, adjacent, and number by thread ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t small_offset=0; index_t mid_offset=0; index_t large_offset=0; //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit && n>0){ small_offset++; } else if(n>0){ //could be more then 2 catigories // else{ mid_offset++; } else { //could be more then 2 catigories large_offset++; } } } small_num[tid] = small_offset; mid_num[tid] = mid_offset; large_num[tid] = large_offset; } __global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256 ( index_t* data, index_t* block_offset ) { //step 1: each block do prefix sum inside int tid = threadIdx.x +blockIdx.x*blockDim.x; __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = data[tid]; __syncthreads(); index_t val=0; for(int i=0; i<=threadIdx.x; i++){ val += temp_in[i]; } __syncthreads(); if(threadIdx.x==255){ block_offset[blockIdx.x] = val; } data[tid] = val; __syncthreads(); } __global__ void prefix_kernel_2 ( index_t* block_offset ) { //step 2: collect each block's offset and do prefix for this set __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = block_offset[threadIdx.x]; __syncthreads(); index_t val=0; for(int i=0; i<threadIdx.x; i++){ val += temp_in[i]; } // val = temp_in[threadIdx.x]; block_offset[threadIdx.x] = val; __syncthreads(); } __global__ void prefix_kernel_3 ( index_t* data, index_t* block_offset ) { //step 3: update by adding block offset int tid = threadIdx.x + blockIdx.x*blockDim.x; index_t val = data[tid]; index_t offset = block_offset[blockIdx.x]; val += offset; data[tid] = val; __syncthreads(); } __global__ void collect_kernel ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, index_t* small_num, index_t* mid_num, index_t* large_num, index_t N1, index_t N2, vertex_t* dest_head, vertex_t* dest_adj ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t thd_base_small = 0; index_t thd_base_mid = N1; index_t thd_base_large = N1+N2; if(tid!=0){ thd_base_small = small_num[tid-1]; thd_base_mid = N1 + mid_num[tid-1]; thd_base_large = N1 + N2 + large_num[tid-1]; } //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; index_t small_offset = thd_base_small; index_t mid_offset = thd_base_mid; index_t large_offset = thd_base_large; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit && n>0){ dest_head[small_offset] = head; dest_adj [small_offset] = adj; small_offset++; } else if(n>0){ //could be more then 2 catigories // else{ dest_head[mid_offset] = head; dest_adj [mid_offset] = adj; mid_offset++; } else { //could be more then 2 catigories dest_head[large_offset] = head; dest_adj [large_offset] = adj; large_offset++; } } } } __global__ void reduce_kernel2(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void graph::scan(){ cudaSetDevice(4); vertex_t* dev_adj; vertex_t* dev_head; index_t* dev_begin; index_t* dev_count; H_ERR(cudaMalloc(&dev_adj, upperEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&dev_head, upperEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) ); H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) ); index_t* block_offset; H_ERR(cudaMalloc(&block_offset, max_block*sizeof(index_t)) ); vertex_t* classified_head; vertex_t* classified_adj; index_t* small_num; index_t* mid_num; index_t* large_num; vertex_t* src_head; vertex_t* src_adj; H_ERR(cudaMalloc(&small_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&mid_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&large_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&src_head, upperEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&src_adj, upperEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMemcpy(src_adj, upperAdj, upperEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(src_head, upperHead, upperEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(dev_begin, upperBegin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) ); dev_adj = src_adj; dev_head= src_head; // H_ERR(cudaMemcpy(src_degree, degree, vert_count*sizeof(index_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMalloc(&classified_head, upperEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&classified_adj, upperEdgeCount*sizeof(vertex_t)) ); // double time1=wtime(); H_ERR(cudaDeviceSynchronize() ); classify_kernel <<<max_block,max_thd>>>( src_adj, src_head, dev_begin, upperEdgeCount, small_num, mid_num, large_num ); H_ERR(cudaDeviceSynchronize() ); //test for prefix sum prefix_kernel_1 <<<max_block,max_thd>>>(small_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(small_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_1 <<<max_block,max_thd>>>(mid_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(mid_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_1 <<<max_block,max_thd>>>(large_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(large_num, block_offset); H_ERR(cudaDeviceSynchronize() ); index_t N1,N2,N3; H_ERR(cudaMemcpy(&N1 , &small_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); H_ERR(cudaMemcpy(&N2 , &mid_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); H_ERR(cudaMemcpy(&N3 , &large_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); H_ERR(cudaDeviceSynchronize() ); // cout<<"N1 = "<<N1<<"\n"; // cout<<"N2 = "<<N2<<"\n"; // cout<<"N3 = "<<N3<<"\n"; collect_kernel <<<max_block,max_thd>>>( src_adj, src_head, dev_begin, upperEdgeCount, small_num, mid_num, large_num, N1, N2, classified_head, classified_adj ); H_ERR(cudaDeviceSynchronize() ); double time2=wtime(); warp_binary_kernel<<<max_block,max_thd>>> ( classified_head, classified_adj, dev_adj, // dev_degree, dev_begin, 0, N1, dev_count ); H_ERR(cudaDeviceSynchronize() ); block_binary_kernel<<<max_block,max_thd>>> ( classified_head, classified_adj, //dev_head, //dev_adj, dev_adj, // dev_degree, dev_begin, N1, N1+N2, // upperEdgeCount, dev_count ); H_ERR(cudaDeviceSynchronize() ); reduce_kernel2 <<<1,1>>>(dev_count); H_ERR(cudaDeviceSynchronize() ); H_ERR(cudaMemcpy(&count[0], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost)); double time4 = wtime(); cout<<"total count = "<<count[0]<<endl; cout<<"GPU time = "<<time4-time2<<" seconds"<<endl; H_ERR(cudaFree(small_num) ); H_ERR(cudaFree(large_num) ); H_ERR(cudaFree(classified_head) ); H_ERR(cudaFree(classified_adj) ); H_ERR(cudaFree(src_head) ); H_ERR(cudaFree(src_adj) ); H_ERR(cudaFree(dev_begin) ); H_ERR(cudaFree(block_offset) ); H_ERR(cudaFree(dev_count) ); }
094e2d15ce35590c7f777610dd5d7911077a308a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdint.h> #include <include/cuda_runtime.h> #include "backend/kernel_compiler/gpu/cuda_impl/select_impl.cuh" template <typename T> __global__ void Select(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) { output[pos] = cond[pos] ? input_x[pos] : input_y[pos]; } return; } template <typename T> void CalSelect(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output, hipStream_t cuda_stream) { hipLaunchKernelGGL(( Select), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, cond, input_x, input_y, output); return; } template void CalSelect<float>(const size_t size, const bool* cond, const float* input_X, const float* input_y, float* output, hipStream_t cuda_stream); template void CalSelect<int>(const size_t size, const bool* cond, const int* input_X, const int* input_y, int* output, hipStream_t cuda_stream); template void CalSelect<half>(const size_t size, const bool* cond, const half* input_X, const half* input_y, half* output, hipStream_t cuda_stream);
094e2d15ce35590c7f777610dd5d7911077a308a.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdint.h> #include <include/cuda_runtime.h> #include "backend/kernel_compiler/gpu/cuda_impl/select_impl.cuh" template <typename T> __global__ void Select(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) { output[pos] = cond[pos] ? input_x[pos] : input_y[pos]; } return; } template <typename T> void CalSelect(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output, cudaStream_t cuda_stream) { Select<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, cond, input_x, input_y, output); return; } template void CalSelect<float>(const size_t size, const bool* cond, const float* input_X, const float* input_y, float* output, cudaStream_t cuda_stream); template void CalSelect<int>(const size_t size, const bool* cond, const int* input_X, const int* input_y, int* output, cudaStream_t cuda_stream); template void CalSelect<half>(const size_t size, const bool* cond, const half* input_X, const half* input_y, half* output, cudaStream_t cuda_stream);
c537d2719adada28e160062fcda9d17770a64017.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <algorithm> #include <stdio.h> #include<iostream> #include<cstdlib> #include<chrono> #include<assert.h> using namespace std; //izracun __global__ void multiply(int* a, int* b, int* c, int n) { //izracun retka i stupca za svaki thread int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //provjera granica if ((row < n) && (col < n)) { int tmp = 0; for (int i = 0; i < n; i++) { tmp += a[row * n + i] * b[i * n + col]; } c[row * n + col] = tmp; } } //izvodenje na procesoru void runOnHost(int* a, int* b, int* c, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { for (int k = 0; k < n; k++) { c[i * n + j] += a[i * n + k] * b[k * n + j]; } } } } //provjera rezultata void verify(int* a, int* b, int* c, int n) { cout << "Provjera rezultata: \n"; int tmp; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { tmp = 0; for (int k = 0; k < n; k++) { tmp += a[i * n + k] * b[k * n + j]; } if (c[i * n + j] != tmp) cout << "nije dobro" << endl; } } } void init_matrices(int* m, int n) { for (int i = 0; i < n * n; i++) { m[i] = rand() % 10; } } int main() { int N = 1 << 10; //dimenzije matrica, 1024x1024, GPU postaje bri na N = 1 << 6 size_t bytes = N * N * sizeof(int); //host pointeri int* h_a, * h_b, * h_c, * h_c_cpu;//rezultantna matrica za izracun na hostu //device pointeri int* d_a, * d_b, * d_c; //alokacija host memorije h_a = (int*)malloc(bytes); h_b = (int*)malloc(bytes); h_c = (int*)malloc(bytes); h_c_cpu = (int*)malloc(bytes); //alokacija device memorije hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); //inicijalizacija matrica init_matrices(h_a, N); init_matrices(h_b, N); //kopiranje matrica na GPU hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); int BLOCK_SIZE = 16; int GRID_SIZE = (int)ceil(N / BLOCK_SIZE); dim3 grid(GRID_SIZE, GRID_SIZE); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); //pocetak mjerenja za GPU chrono::steady_clock::time_point begin = chrono::steady_clock::now(); //launch kernel multiply << <grid, threads >> > (d_a, d_b, d_c, N); hipDeviceSynchronize(); //kopiraj rezultat sa GPU-a na host hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost); //zavrsetak mjerenja za GPU chrono::steady_clock::time_point end = chrono::steady_clock::now(); cout << "Vrijeme za izracun na GPU = " << chrono::duration_cast<std::chrono::milliseconds>(end - begin).count() << " mikrosekundi" << endl; //pocetak mjerenja za CPU chrono::steady_clock::time_point beginCPU = chrono::steady_clock::now(); //izracun na CPU runOnHost(h_a, h_b, h_c_cpu, N); //kraj mjerenja za CPU chrono::steady_clock::time_point endCPU = chrono::steady_clock::now(); cout << "Vrijeme za izracun na CPU = " << chrono::duration_cast<std::chrono::milliseconds>(endCPU - beginCPU).count() << " mikrosekundi" << endl; verify(h_a, h_b, h_c, N); cout <<"\neverything works" << endl; //ispis dobivene matrice /*for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { cout << h_c[i * N + j] << " "; } cout << endl; }*/ /*for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { cout << h_c_cpu[i * N + j] << " "; } cout << endl; }*/ //oslobadanje memorije na hostu free(h_a); free(h_b); free(h_c); //oslobadanje memorije na GPU hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
c537d2719adada28e160062fcda9d17770a64017.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <algorithm> #include <stdio.h> #include<iostream> #include<cstdlib> #include<chrono> #include<assert.h> using namespace std; //izracun __global__ void multiply(int* a, int* b, int* c, int n) { //izracun retka i stupca za svaki thread int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //provjera granica if ((row < n) && (col < n)) { int tmp = 0; for (int i = 0; i < n; i++) { tmp += a[row * n + i] * b[i * n + col]; } c[row * n + col] = tmp; } } //izvodenje na procesoru void runOnHost(int* a, int* b, int* c, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { for (int k = 0; k < n; k++) { c[i * n + j] += a[i * n + k] * b[k * n + j]; } } } } //provjera rezultata void verify(int* a, int* b, int* c, int n) { cout << "Provjera rezultata: \n"; int tmp; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { tmp = 0; for (int k = 0; k < n; k++) { tmp += a[i * n + k] * b[k * n + j]; } if (c[i * n + j] != tmp) cout << "nije dobro" << endl; } } } void init_matrices(int* m, int n) { for (int i = 0; i < n * n; i++) { m[i] = rand() % 10; } } int main() { int N = 1 << 10; //dimenzije matrica, 1024x1024, GPU postaje brži na N = 1 << 6 size_t bytes = N * N * sizeof(int); //host pointeri int* h_a, * h_b, * h_c, * h_c_cpu;//rezultantna matrica za izracun na hostu //device pointeri int* d_a, * d_b, * d_c; //alokacija host memorije h_a = (int*)malloc(bytes); h_b = (int*)malloc(bytes); h_c = (int*)malloc(bytes); h_c_cpu = (int*)malloc(bytes); //alokacija device memorije cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); //inicijalizacija matrica init_matrices(h_a, N); init_matrices(h_b, N); //kopiranje matrica na GPU cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); int BLOCK_SIZE = 16; int GRID_SIZE = (int)ceil(N / BLOCK_SIZE); dim3 grid(GRID_SIZE, GRID_SIZE); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); //pocetak mjerenja za GPU chrono::steady_clock::time_point begin = chrono::steady_clock::now(); //launch kernel multiply << <grid, threads >> > (d_a, d_b, d_c, N); cudaDeviceSynchronize(); //kopiraj rezultat sa GPU-a na host cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); //zavrsetak mjerenja za GPU chrono::steady_clock::time_point end = chrono::steady_clock::now(); cout << "Vrijeme za izracun na GPU = " << chrono::duration_cast<std::chrono::milliseconds>(end - begin).count() << " mikrosekundi" << endl; //pocetak mjerenja za CPU chrono::steady_clock::time_point beginCPU = chrono::steady_clock::now(); //izracun na CPU runOnHost(h_a, h_b, h_c_cpu, N); //kraj mjerenja za CPU chrono::steady_clock::time_point endCPU = chrono::steady_clock::now(); cout << "Vrijeme za izracun na CPU = " << chrono::duration_cast<std::chrono::milliseconds>(endCPU - beginCPU).count() << " mikrosekundi" << endl; verify(h_a, h_b, h_c, N); cout <<"\neverything works" << endl; //ispis dobivene matrice /*for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { cout << h_c[i * N + j] << " "; } cout << endl; }*/ /*for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { cout << h_c_cpu[i * N + j] << " "; } cout << endl; }*/ //oslobadanje memorije na hostu free(h_a); free(h_b); free(h_c); //oslobadanje memorije na GPU cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
a3d0cf8aa7cc05262404dd115e3f4bd9cb9cdec9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cstdlib> #include <hiprand/hiprand.h> #include <assert.h> #include <unistd.h> #include <rocblas.h> #include <iostream> #include <complex.h> #include <math.h> #include <hip/hip_complex.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "cublas_beamformer.h" using namespace std; void beamform(); __global__ void transpose(signed char* data, signed char* tra_data); __global__ void data_restructure(signed char * data, hipComplex * data_restruc); __global__ void sti_reduction(hipComplex * data_in, float * data_out); // Struct defintion for beamformer metadata typedef struct bf_metadata_struct { float offsets[14]; char cal_filename[65]; char algorithm[65]; char weight_filename[65]; long long unsigned int xid; } bf_metadata; static bf_metadata my_metadata; static hipComplex * d_weights = NULL; void update_weights(char * filename){ printf("RTBF: In update_weights()...\n"); char weight_filename[128]; strcpy(weight_filename, filename); FILE * weights; float * bf_weights; float complex * weights_dc; float complex * weights_dc_n; // Allocate heap memory for file data //bf_weights = (float *)malloc(2*BN_WEIGHTS*sizeof(float)); //weights_dc = (float complex *)malloc(BN_WEIGHTS*sizeof(float complex *)); //weights_dc_n = (float complex *)malloc(BN_WEIGHTS*sizeof(float complex *)); // For pinned memory //////////////////////////////////////////////////// // Weights doesn't need pinned memory implemented because it is less than 16 MB /////////// hipHostMalloc((void **)&bf_weights,2*BN_WEIGHTS*sizeof(float complex *)); hipHostMalloc((void **)&weights_dc,BN_WEIGHTS*sizeof(float complex *)); hipHostMalloc((void **)&weights_dc_n,BN_WEIGHTS*sizeof(float complex *)); ///////////////////////////////////////////////////////////////////////// // open weight file weights = fopen(weight_filename, "r"); int j; if (weights != NULL) { fread(bf_weights, sizeof(float), 2*BN_WEIGHTS, weights); fread(my_metadata.offsets, sizeof(float), 14, weights); fread(my_metadata.cal_filename, sizeof(char), 64, weights); fread(my_metadata.algorithm, sizeof(char), 64, weights); fread(&(my_metadata.xid), sizeof(long long unsigned int), 1, weights); my_metadata.cal_filename[64] = '\0'; my_metadata.algorithm[64] = '\0'; // Extract all path information from weight_filename for metadata char * short_filename = strrchr(weight_filename, '/'); if (short_filename != NULL) { strcpy(my_metadata.weight_filename, short_filename+1); } else { strcpy(my_metadata.weight_filename, weight_filename); } // Convert to complex numbers (do a conjugate at the same time) for(j = 0; j < BN_WEIGHTS; j++){ weights_dc_n[j] = bf_weights[2*j] - bf_weights[(2*j)+1]*I; } // Transpose the weights int m,n; float complex transpose[BN_BEAM][BN_ELE_BLOC*BN_BIN]; for(m=0;m<BN_BEAM;m++){ for(n=0;n<BN_ELE_BLOC*BN_BIN;n++){ transpose[m][n] = weights_dc_n[m*BN_ELE_BLOC*BN_BIN + n]; } } for(n=0;n<BN_ELE_BLOC*BN_BIN;n++){ for(m=0;m<BN_BEAM;m++){ weights_dc[n*BN_BEAM+ m] = transpose[m][n]; } } fclose(weights); } // Copy weights to device hipMemcpy(d_weights, weights_dc, BN_WEIGHTS*sizeof(hipComplex), hipMemcpyHostToDevice); //r_weights instead of weights_dc //*BN_TIME // free memory //free(weights_dc); //free(weights_dc_n); //free(bf_weights); // Free pinned memory //////////////////////////////////////////////////// hipHostFree(weights_dc); hipHostFree(weights_dc_n); hipHostFree(bf_weights); ///////////////////////////////////////////////////////////////////////// return; } void bf_get_offsets(float * offsets){ for(int i = 0; i<BN_BEAM; i++){ offsets[i] = my_metadata.offsets[i]; } } void bf_get_cal_filename(char * cal_filename){ for(int i = 0; i< 65; i++){ cal_filename[i] = my_metadata.cal_filename[i]; } } void bf_get_algorithm(char * algorithm){ for(int i = 0; i< 65; i++){ algorithm[i] = my_metadata.algorithm[i]; } } void bf_get_weight_filename(char * weight_filename){ int num_chars = strlen(my_metadata.weight_filename); for (int i = 0; i < num_chars; i++) { weight_filename[i] = my_metadata.weight_filename[i]; } for (int i = num_chars; i < 64; i++) { weight_filename[i] = ' '; } weight_filename[64] = '\0'; } long long unsigned int bf_get_xid(){ return my_metadata.xid; } static hipComplex * d_beamformed = NULL; static hipComplex * d_data = NULL; static signed char * d_data1 = NULL; // Device memory for input data static signed char * d_data2 = NULL; static float * d_outputs; static hipblasHandle_t handle; static hipComplex **d_arr_A = NULL; static hipComplex **d_arr_B = NULL; static hipComplex **d_arr_C = NULL; void init_beamformer(){ // Allocate memory for the weights, data, beamformer output, and sti output. hipMalloc((void **)&d_weights, BN_WEIGHTS*sizeof(hipComplex)); //*BN_TIME hipMalloc((void **)&d_data1, 2*BN_SAMP*sizeof(signed char)); //hipMalloc((void **)&d_data2, 2*BN_SAMP*sizeof(signed char)); hipMalloc((void **)&d_data, BN_SAMP*sizeof(hipComplex)); hipError_t err_malloc = hipMalloc((void **)&d_beamformed, BN_TBF*sizeof(hipComplex)); if (err_malloc != hipSuccess) { printf("CUDA Error (cudaMalloc2): %s\n", hipGetErrorString(err_malloc)); } hipMalloc((void **)&d_outputs, BN_POL*(BN_OUTPUTS*sizeof(float)/2)); /********************************************************** * Create a handle for CUBLAS **********************************************************/ hipblasCreate(&handle); hipError_t cudaStat; int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C; nr_rows_A = BN_BEAM; nr_cols_A = BN_ELE_BLOC; nr_rows_B = BN_ELE_BLOC; nr_cols_B = BN_TIME; nr_rows_C = BN_BEAM; nr_cols_C = BN_TIME; // Allocate memory to host arrays - This is all memory allocated to arrays that are used by gemmBatched. Allocate 3 arrays on CPU const hipComplex **h_arr_A = 0; const hipComplex **h_arr_B = 0; hipComplex **h_arr_C = 0; //h_arr_A = (const hipComplex **)malloc(nr_rows_A * nr_cols_A *BN_BIN*sizeof(const hipComplex*)); //h_arr_B = (const hipComplex **)malloc(nr_rows_B * nr_cols_B *BN_BIN*sizeof(const hipComplex*)); //h_arr_C = (hipComplex **)malloc(nr_rows_C * nr_cols_C *BN_BIN*sizeof(hipComplex*)); ///////////////////////////////////////////////////////////////////////// hipHostMalloc((void **)&h_arr_A, nr_rows_A * nr_cols_A *BN_BIN*sizeof(const hipComplex*)); hipHostMalloc((void **)&h_arr_B, nr_rows_B * nr_cols_B *BN_BIN*sizeof(const hipComplex*)); hipHostMalloc((void **)&h_arr_C, nr_rows_C * nr_cols_C *BN_BIN*sizeof(hipComplex*)); ///////////////////////////////////////////////////////////////////////// // Allocate memory for each batch in an array. for(int i = 0; i < BN_BIN; i++){ h_arr_A[i] = d_weights + i*nr_rows_A*nr_cols_A; h_arr_B[i] = d_data + i*nr_rows_B*nr_cols_B; h_arr_C[i] = d_beamformed + i*nr_rows_C*nr_cols_C; } // Allocate memory to arrays on device. cudaStat = hipMalloc((void **)&d_arr_A,nr_rows_A * nr_cols_A * BN_BIN * sizeof(hipComplex*)); assert(!cudaStat); cudaStat = hipMalloc((void **)&d_arr_B,nr_rows_B * nr_cols_B * BN_BIN * sizeof(hipComplex*)); assert(!cudaStat); cudaStat = hipMalloc((void **)&d_arr_C,nr_rows_C * nr_cols_C * BN_BIN * sizeof(hipComplex*)); assert(!cudaStat); // Copy memory from host to device. cudaStat = hipMemcpy(d_arr_A,h_arr_A,nr_rows_A * nr_cols_A * BN_BIN * sizeof(hipComplex*),hipMemcpyHostToDevice); assert(!cudaStat); cudaStat = hipMemcpy(d_arr_B,h_arr_B,nr_rows_B * nr_cols_B * BN_BIN * sizeof(hipComplex*),hipMemcpyHostToDevice); assert(!cudaStat); cudaStat = hipMemcpy(d_arr_C,h_arr_C,nr_rows_C * nr_cols_C * BN_BIN * sizeof(hipComplex*),hipMemcpyHostToDevice); assert(!cudaStat); //////////////////////////////////////////////////////////////////////////////// // CUDA streams applied for optimization to possibly eliminate stalls. // const int time_stream = 2000; // const int nStreamsB = nr_cols_B/time_stream; // Number of streams // const int streamSizeB = nr_rows_B*BN_BIN*time_stream; // const int streamBytesB = streamSizeB * sizeof(hipComplex*); // Create events and streams // //hipStream_t streamB[nStreamsB]; // hipStream_t streamCol[nStreamsB]; // for (int i = 0; i < nStreamsB; ++i) { // //hipStreamCreate(&streamB[i]); // hipStreamCreate(&streamCol[i]); // } // for (int i = 0; i < nStreamsB; ++i){ // int offset = i * streamSizeB; // //cudaStat = hipMemcpyAsync(&d_arr_B[offset], &h_arr_B[offset], streamBytesB, hipMemcpyHostToDevice, streamB[i]); // cudaStat = hipMemcpyAsync(&d_arr_B[offset], &h_arr_B[offset], streamBytesB, hipMemcpyHostToDevice, streamCol[i]); // assert(!cudaStat); // } // CUDA streams applied for optimization to possibly eliminate stalls. // const int nStreamsC = nr_cols_C/time_stream; // Number of streams // const int streamSizeC = nr_rows_C*BN_BIN*time_stream; // const int streamBytesC = streamSizeC * sizeof(hipComplex*); // Create events and streams // //hipStream_t streamC[nStreamsC]; // //for (int i = 0; i < nStreamsC; ++i) { // // hipStreamCreate(&streamC[i]); // //} // for (int i = 0; i < nStreamsC; ++i){ // int offset = i * streamSizeC; // //cudaStat = hipMemcpyAsync(&d_arr_C[offset], &h_arr_C[offset], streamBytesC, hipMemcpyHostToDevice, streamC[i]); // cudaStat = hipMemcpyAsync(&d_arr_C[offset], &h_arr_C[offset], streamBytesC, hipMemcpyHostToDevice, streamCol[i]); // assert(!cudaStat); // } // for (int i = 0; i < nStreamsC; ++i) { // hipStreamDestroy(streamCol[i]); // } /////////////////////////////////////////////////////////////////////////////// //free(h_arr_A); //free(h_arr_B); //free(h_arr_C); // Free pinned memory ///////////////////////////////////////////////////////// hipHostFree(h_arr_A); hipHostFree(h_arr_B); hipHostFree(h_arr_C); /////////////////////////////////////////////////////////////////////////////// return; } signed char * data_in(char * input_filename){ FILE * data; // File data pointers signed char * bf_data; // Complex data pointers // float complex * data_dc; // Allocate heap memory for file data //bf_data = (signed char *)malloc(2*BN_SAMP*sizeof(signed char)); // For pinned memory //////////////////////////////////////////////////// hipHostMalloc((void **)&bf_data, 2*BN_SAMP*sizeof(signed char)); ///////////////////////////////////////////////////////////////////////// //data_dc = (float complex *)malloc(BN_SAMP*sizeof(float complex *)); // Open files data = fopen(input_filename, "r"); /********************************************************* * Read in Data *********************************************************/ if (data != NULL) { fread(bf_data, sizeof(signed char), 2*BN_SAMP, data); /* int j; // Make 'em complex! for (j = 0; j < BN_SAMP; j++) { data_dc[j] = bf_data[2*j] + bf_data[(2*j)+1]*I; } */ // Specify grid and block dimensions // dim3 dimBlock_d(BN_ELE, 1, 1); // dim3 dimGrid_d(BN_TIME, BN_BIN, 1); //hipComplex * d_data_in = d_data1; //hipComplex * d_data_out = d_data; //hipMemcpy(d_data_in, data_dc, BN_SAMP*sizeof(hipComplex), hipMemcpyHostToDevice); // Restructure data for hipblasCgemmBatched function. //data_restructure<<<dimGrid_d, dimBlock_d>>>(d_data_in, d_data_out); fclose(data); } // Don't free pinned memory because it is this functions return value //// //hipHostFree(bf_data); ///////////////////////////////////////////////////////////////////////// //free(bf_data); //free(data_dc); return bf_data; } void beamform() { int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C; nr_rows_A = BN_BEAM; nr_cols_A = BN_ELE_BLOC; nr_rows_B = BN_ELE_BLOC; nr_cols_B = BN_TIME; nr_rows_C = BN_BEAM; // Leading dimensions are always the rows of each matrix since the data is stored in a column-wise order. int lda=nr_rows_A, ldb=nr_rows_B, ldc=nr_rows_C; hipComplex alf; hipComplex bet; alf.x = 1; alf.y = 0; bet.x = 0; bet.y = 0; int batchCount = BN_BIN; // There must be the same number of batches in each array. hipblasStatus_t stat; /* This function performs a matrix multiplication of the data and the weights. Weights - d_arr_A, Data - d_arr_B, and the output - d_arr_C. */ stat = hipblasCgemmBatched( handle, // handle to the cuBLAS library context. HIPBLAS_OP_N, // Operation on matrices within array A. HIPBLAS_OP_N, // Operation on matrices within array B. nr_rows_A, // Number of rows in matrix A and C. nr_cols_B, // Number of columns in matrix B and C. nr_cols_A, // Number of columns and rows in matrix A and B respectively. &alf, // Scalar used for multiplication. (const hipComplex **)d_arr_A, // Weight array of pointers. lda, // Leading dimension of each batch or matrix in array A. (const hipComplex **)d_arr_B, // Data array of pointers. ldb, // Leading dimension of each batch or matrix in array B. &bet, // Scalar used for multiplication. (hipComplex **)d_arr_C, // Output array of pointers. ldc, // Leading dimension of each batch or matrix in array C. batchCount); // Number of batches in each array. if (stat == HIPBLAS_STATUS_INVALID_VALUE) { printf("RTBF: Invalid CUBLAS values\n"); } else if (stat == HIPBLAS_STATUS_EXECUTION_FAILED) { printf("RTBF: Execution failed.\n"); } if(stat != HIPBLAS_STATUS_SUCCESS){ cerr << "hipblasCgemmBatched failed" << endl; exit(1); } assert(!hipGetLastError()); } __global__ void transpose(signed char* data, signed char* tra_data) { int i = threadIdx.x; int c = threadIdx.y; int m = blockIdx.x; int f = blockIdx.y; int t = blockIdx.z; //int Nm = gridDim.x; // number of mcnts (packets) int Nf = gridDim.y; // number of f-engines (ROACHES) int Nt = gridDim.z; // time samples per mcnt int Ni = blockDim.x; // inputs per f-engine (aka antenna elements per ROACH) int Nc = blockDim.y; // bins per mcnt int in_idx = i + Ni*c + Nc*Ni*t + Nt*Nc*Ni*f + Nf*Nt*Nc*Ni*m; int out_idx = i + Ni*f + Nf*Ni*c + Nc*Nf*Ni*t + Nt*Nc*Nf*Ni*m; tra_data[2*out_idx] = data[2*in_idx]; tra_data[2*out_idx + 1] = data[2*in_idx+1]; return; } __global__ void data_restructure(signed char * data, hipComplex * data_restruc){ //void data_restructure(signed char * data, hipComplex * data_restruc, int offset){ /* Repurpose the transpose thread in the hashpipe codes by performing the transpose in the GPU. The motivation was, why transpose then transpose again? Why not just perform one transpose in the GPU which would be faster anyway. */ int i = threadIdx.x; int c = threadIdx.y; int m = blockIdx.x; int f = blockIdx.y; int t = blockIdx.z; int Nm = gridDim.x; // number of mcnts (packets) int Nf = gridDim.y; // number of f-engines (ROACHES) int Nt = gridDim.z; // time samples per mcnt int Ni = blockDim.x; // inputs per f-engine (aka antenna elements per ROACH) int Nc = blockDim.y; // bins per mcnt // Offset for CUDA streams ////////////////////////////////////////////// //int in_idx = offset + (i + Ni*c + Nc*Ni*t + Nt*Nc*Ni*f + Nf*Nt*Nc*Ni*m); //int out_idx = offset + (i + Ni*f + Nf*Ni*t + Nt*Nf*Ni*m + Nm*Nt*Nf*Ni*c); ///////////////////////////////////////////////////////////////////////// int in_idx = i + Ni*c + Nc*Ni*t + Nt*Nc*Ni*f + Nf*Nt*Nc*Ni*m; int out_idx = i + Ni*f + Nf*Ni*t + Nt*Nf*Ni*m + Nm*Nt*Nf*Ni*c; data_restruc[out_idx].x = data[2*in_idx]*1.0f; data_restruc[out_idx].y = data[2*in_idx + 1]*1.0f; return; /* // Original Code int e = threadIdx.x; int t = blockIdx.x; int f = blockIdx.y; //Restructure data so that the frequency bin is the slowest moving index data_restruc[f*BN_TIME*BN_ELE_BLOC + t*BN_ELE_BLOC + e].x = data[2*(t*BN_BIN*BN_ELE_BLOC + f*BN_ELE_BLOC + e)]*1.0f; data_restruc[f*BN_TIME*BN_ELE_BLOC + t*BN_ELE_BLOC + e].y = data[2*(t*BN_BIN*BN_ELE_BLOC + f*BN_ELE_BLOC + e) + 1]*1.0f; return; */ } __global__ void sti_reduction(hipComplex * data_in, float * data_out) { int f = blockIdx.x; int b = blockIdx.y; int t = threadIdx.x; int s = blockIdx.z; int h = sample_idx(s*BN_TIME_STI + t,b,f); // Preprocessor macro used for the output of the beamformer. More detail can be seen in the header file. (First set of beams) int h1 = sample_idx(s*BN_TIME_STI + t,b+BN_BEAM1,f); // Preprocessor macro used for the output of the beamformer. More detail can be seen in the header file. (Last set of beams) // Temporary variables used for updating. float beam_power1; float beam_power2; float cross_power1; float cross_power2; cuFloatComplex samp1; cuFloatComplex samp2; float scale = 1.0/BN_TIME_STI; // Scale power by number of samples per STI window. __shared__ cuFloatComplex reduced_array1[BN_STI_BLOC]; __shared__ cuFloatComplex reduced_array[BN_STI_BLOC]; if (t < BN_TIME_STI) { // X polarization (XX*). samp1.x = data_in[h].x; samp1.y = data_in[h].y; beam_power1 = (samp1.x * samp1.x) + (samp1.y * samp1.y); // Beamformer output multiplied by its conjugate (absolute value squared). reduced_array[t].x = beam_power1; // Y polarization (YY*). samp2.x = data_in[h1].x; samp2.y = data_in[h1].y; beam_power2 = (samp2.x * samp2.x) + (samp2.y * samp2.y); // Beamformer output multiplied by its conjugate (absolute value squared). reduced_array[t].y = beam_power2; // Cross polarization (XY*). cross_power1 = (samp1.x * samp2.x) + (samp1.y * samp2.y); // Real part of cross polarization. cross_power2 = (samp1.y * samp2.x) - (samp1.x * samp2.y); // Imaginary part of cross polarization. reduced_array1[t].x = cross_power1; reduced_array1[t].y = cross_power2; } else{ reduced_array[t].x = 0.0; reduced_array[t].y = 0.0; reduced_array1[t].x = 0.0; reduced_array1[t].y = 0.0; } __syncthreads(); // Reduction is performed by splitting up the threads in each block and summing them all up. // The number of threads in each block needs to be a power of two in order for the reduction to work. (No left over threads). for(int k = blockDim.x/2; k>0; k>>=1){ if(t<k){ reduced_array[t].x += reduced_array[t+k].x; reduced_array[t].y += reduced_array[t+k].y; reduced_array1[t].x += reduced_array1[t+k].x; reduced_array1[t].y += reduced_array1[t+k].y; } __syncthreads(); } // After reduction is complete, assign each reduced to value to appropriate position in output array. if(t == 0){ data_out[output_idx(0,b,s,f)] = reduced_array[0].x*scale; // XX*. data_out[output_idx(1,b,s,f)] = reduced_array[0].y*scale; // YY*. data_out[output_idx(2,b,s,f)] = reduced_array1[0].x*scale; // XY* real. data_out[output_idx(3,b,s,f)] = reduced_array1[0].y*scale; // XY* imaginary. } return; } void run_beamformer(signed char * data_in, float * data_out) { hipError_t err_code; // Specify grid and block dimensions dim3 dimBlock(BN_STI_BLOC, 1, 1); dim3 dimGrid(BN_BIN, BN_BEAM1, BN_STI); // Specify grid and block dimensions dim3 dimBlock_d(BN_ELE_BLOC, 1, 1); dim3 dimGrid_d(BN_TIME, BN_BIN, 1); int Nm = 200; // Halfed for CUDA streams offset = 100; //int Nm = 400; int Nf = 8; int Nt = 20; int Nc = 25; //int Nc = 20; int Ni = 8; dim3 gridDim_transpose(Nm, Nf, Nt); dim3 blockDim_transpose(Ni, Nc, 1); signed char* d_tra_data_in = d_data1; //signed char* d_tra_data_out = d_data2; //signed char * d_restruct_in = d_data1; hipComplex * d_restruct_out = d_data; //hipMemcpy(d_restruct_in, data_in, 2*BN_SAMP*sizeof(signed char), hipMemcpyHostToDevice); // Previously the hipMemcpy below ///////////////////////////////////////////// hipMemcpy(d_tra_data_in, data_in, 2*BN_SAMP*sizeof(signed char), hipMemcpyHostToDevice); //////////////////////////////////////////////////////////////////////////////// // CUDA streams and events applied for optimization to possibly eliminate stalls. // const int time_streamIn = 2000; // const int nStreamsIn = BN_TIME/time_streamIn; // Number of streams // const int streamSizeIn = 2*BN_ELE_BLOC*BN_BIN*time_streamIn; // const int streamBytesIn = streamSizeIn * sizeof(signed char); // Create events and streams // Events //////////////////////////////////// // hipEvent_t startEvent, stopEvent; ////////////////////////////////////////////// // //hipStream_t streamIn[nStreamsIn]; // hipStream_t stream[nStreamsIn]; // Events //////////////////////////////////// // hipEventCreate(&startEvent); // hipEventCreate(&stopEvent); // hipEventRecord(startEvent, 0); ///////////////////////////////////////////// // for (int i = 0; i < nStreamsIn; ++i) { // //hipStreamCreate(&streamIn[i]); // hipStreamCreate(&stream[i]); //} // for (int i = 0; i < nStreamsIn; ++i){ // int offset = i * streamSizeIn; // //hipMemcpyAsync(&d_tra_data_in[offset], &data_in[offset], streamBytesIn, hipMemcpyHostToDevice, streamIn[i]); // hipMemcpyAsync(&d_tra_data_in[offset], &data_in[offset], streamBytesIn, hipMemcpyHostToDevice, stream[i]); // } // Events //////////////////////////////////// // hipEventRecord(stopEvent, 0); // hipEventSynchronize(stopEvent); ///////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// err_code = hipGetLastError(); if (err_code != hipSuccess) { printf("RTBF: hipMemcpy Failed: %s\n", hipGetErrorString(err_code)); } // Transpose the data // transpose<<<gridDim_transpose, blockDim_transpose>>>(d_tra_data_in, d_tra_data_out); // if (err_code != hipSuccess) { // printf("RTBF: CUDA Error (transpose): %s\n", hipGetErrorString(err_code)); // } // Restructure data for hipblasCgemmBatched function. // Original data_restructure() kernel ///////////////////////////////////////////////////// hipLaunchKernelGGL(( data_restructure), dim3(gridDim_transpose), dim3(blockDim_transpose), 0, 0, d_tra_data_in, d_restruct_out); /////////////////////////////////////////////////////////////////////////////////////////// //for (int i = 0; i < nStreamsIn; ++i) { // int offset = i * (streamSizeIn/2); // data_restructure<<<gridDim_transpose, blockDim_transpose, 0, streamIn[i]>>>(d_tra_data_in, d_restruct_out,offset); //} /////////////////////////////////////////////////////////////////////////////////////////// //data_restructure<<<gridDim_transpose, blockDim_transpose>>>(d_restruct_in, d_restruct_out); //data_restructure<<<dimGrid_d, dimBlock_d>>>(d_restruct_in, d_restruct_out); if (err_code != hipSuccess) { printf("RTBF: CUDA Error (data_restructure): %s\n", hipGetErrorString(err_code)); } // Call beamformer function containing hipblasCgemmBatched() beamform(); err_code = hipGetLastError(); if (err_code != hipSuccess) { printf("CUDA Error (beamform): %s\n", hipGetErrorString(err_code)); } hipComplex * d_sti_in = d_beamformed; float * d_sti_out = d_outputs; // Call STI reduction kernel. hipLaunchKernelGGL(( sti_reduction), dim3(dimGrid), dim3(dimBlock), 0, 0, d_sti_in, d_sti_out); err_code = hipGetLastError(); if (err_code != hipSuccess) { printf("CUDA Error (sti_reduction): %s\n", hipGetErrorString(err_code)); } // Copy output data from device to host. // Previously the hipMemcpy below ////////////////////////////////////////////// hipMemcpy(data_out, d_sti_out, BN_POL*(BN_OUTPUTS*sizeof(float)/2),hipMemcpyDeviceToHost); //////////////////////////////////////////////////////////////////////////////// // CUDA streams applied for optimization to possibly eliminate stalls. // const int time_streamOut = 50; // const int nStreamsOut = BN_STI/time_streamOut; // Number of streams // const int streamSizeOut = BN_POL*BN_BEAM1*BN_BIN*time_streamOut; // const int streamBytesOut = streamSizeOut * sizeof(float); // Create events and streams // //hipStream_t streamOut[nStreamsOut]; // //for (int i = 0; i < nStreamsOut; ++i) { // // hipStreamCreate(&streamOut[i]); // //} // Events //////////////////////////////////// // hipEventRecord(startEvent, 0); ///////////////////////////////////////////// // for (int i = 0; i < nStreamsOut; ++i){ // int offset = i * streamSizeOut; // //hipMemcpyAsync(&data_out[offset], &d_sti_out[offset], streamBytesOut,hipMemcpyDeviceToHost, streamOut[i]); // hipMemcpyAsync(&data_out[offset], &d_sti_out[offset], streamBytesOut,hipMemcpyDeviceToHost, stream[i]); // } // Events //////////////////////////////////// // hipEventRecord(stopEvent, 0); // hipEventSynchronize(stopEvent); ///////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Clean up streams // hipEventDestroy(startEvent); // hipEventDestroy(stopEvent); // //for (int i = 0; i < nStreamsIn; ++i) { // // hipStreamDestroy(streamIn[i]); // //} // for (int i = 0; i < nStreamsIn; ++i) { // hipStreamDestroy(stream[i]); // } // //for (int i = 0; i < nStreamsOut; ++i) { // // hipStreamDestroy(streamOut[i]); // //} ////////////////////////////////////////////////////////////////////////////// return; } void rtbfCleanup() { // Free up GPU memory at the end of a program if (d_beamformed != NULL) { hipFree(d_beamformed); } if (d_data != NULL) { hipFree(d_data); } if (d_data1 != NULL) { hipFree(d_data1); //hipHostFree(d_data1); // Clean up pinned memory (use hipHostFree() with hipHostMalloc()) } if (d_data2 != NULL) { hipFree(d_data2); } if (d_outputs != NULL) { hipFree(d_outputs); } if (d_weights != NULL) { hipFree(d_weights); //hipHostFree(d_weights); // Clean up pinned memory (use hipHostFree() with hipHostMalloc()) } if (d_arr_A != NULL) { hipFree(d_arr_A); } if (d_arr_B != NULL) { hipFree(d_arr_B); } if (d_arr_C != NULL) { hipFree(d_arr_C); } // Free up and release cublas handle hipblasDestroy(handle); }
a3d0cf8aa7cc05262404dd115e3f4bd9cb9cdec9.cu
#include <stdio.h> #include <stdlib.h> #include <cstdlib> #include <curand.h> #include <assert.h> #include <unistd.h> #include <cublas_v2.h> #include <iostream> #include <complex.h> #include <math.h> #include <cuComplex.h> #include <cuda.h> #include <cuda_runtime.h> #include "cublas_beamformer.h" using namespace std; void beamform(); __global__ void transpose(signed char* data, signed char* tra_data); __global__ void data_restructure(signed char * data, cuComplex * data_restruc); __global__ void sti_reduction(cuComplex * data_in, float * data_out); // Struct defintion for beamformer metadata typedef struct bf_metadata_struct { float offsets[14]; char cal_filename[65]; char algorithm[65]; char weight_filename[65]; long long unsigned int xid; } bf_metadata; static bf_metadata my_metadata; static cuComplex * d_weights = NULL; void update_weights(char * filename){ printf("RTBF: In update_weights()...\n"); char weight_filename[128]; strcpy(weight_filename, filename); FILE * weights; float * bf_weights; float complex * weights_dc; float complex * weights_dc_n; // Allocate heap memory for file data //bf_weights = (float *)malloc(2*BN_WEIGHTS*sizeof(float)); //weights_dc = (float complex *)malloc(BN_WEIGHTS*sizeof(float complex *)); //weights_dc_n = (float complex *)malloc(BN_WEIGHTS*sizeof(float complex *)); // For pinned memory //////////////////////////////////////////////////// // Weights doesn't need pinned memory implemented because it is less than 16 MB /////////// cudaMallocHost((void **)&bf_weights,2*BN_WEIGHTS*sizeof(float complex *)); cudaMallocHost((void **)&weights_dc,BN_WEIGHTS*sizeof(float complex *)); cudaMallocHost((void **)&weights_dc_n,BN_WEIGHTS*sizeof(float complex *)); ///////////////////////////////////////////////////////////////////////// // open weight file weights = fopen(weight_filename, "r"); int j; if (weights != NULL) { fread(bf_weights, sizeof(float), 2*BN_WEIGHTS, weights); fread(my_metadata.offsets, sizeof(float), 14, weights); fread(my_metadata.cal_filename, sizeof(char), 64, weights); fread(my_metadata.algorithm, sizeof(char), 64, weights); fread(&(my_metadata.xid), sizeof(long long unsigned int), 1, weights); my_metadata.cal_filename[64] = '\0'; my_metadata.algorithm[64] = '\0'; // Extract all path information from weight_filename for metadata char * short_filename = strrchr(weight_filename, '/'); if (short_filename != NULL) { strcpy(my_metadata.weight_filename, short_filename+1); } else { strcpy(my_metadata.weight_filename, weight_filename); } // Convert to complex numbers (do a conjugate at the same time) for(j = 0; j < BN_WEIGHTS; j++){ weights_dc_n[j] = bf_weights[2*j] - bf_weights[(2*j)+1]*I; } // Transpose the weights int m,n; float complex transpose[BN_BEAM][BN_ELE_BLOC*BN_BIN]; for(m=0;m<BN_BEAM;m++){ for(n=0;n<BN_ELE_BLOC*BN_BIN;n++){ transpose[m][n] = weights_dc_n[m*BN_ELE_BLOC*BN_BIN + n]; } } for(n=0;n<BN_ELE_BLOC*BN_BIN;n++){ for(m=0;m<BN_BEAM;m++){ weights_dc[n*BN_BEAM+ m] = transpose[m][n]; } } fclose(weights); } // Copy weights to device cudaMemcpy(d_weights, weights_dc, BN_WEIGHTS*sizeof(cuComplex), cudaMemcpyHostToDevice); //r_weights instead of weights_dc //*BN_TIME // free memory //free(weights_dc); //free(weights_dc_n); //free(bf_weights); // Free pinned memory //////////////////////////////////////////////////// cudaFreeHost(weights_dc); cudaFreeHost(weights_dc_n); cudaFreeHost(bf_weights); ///////////////////////////////////////////////////////////////////////// return; } void bf_get_offsets(float * offsets){ for(int i = 0; i<BN_BEAM; i++){ offsets[i] = my_metadata.offsets[i]; } } void bf_get_cal_filename(char * cal_filename){ for(int i = 0; i< 65; i++){ cal_filename[i] = my_metadata.cal_filename[i]; } } void bf_get_algorithm(char * algorithm){ for(int i = 0; i< 65; i++){ algorithm[i] = my_metadata.algorithm[i]; } } void bf_get_weight_filename(char * weight_filename){ int num_chars = strlen(my_metadata.weight_filename); for (int i = 0; i < num_chars; i++) { weight_filename[i] = my_metadata.weight_filename[i]; } for (int i = num_chars; i < 64; i++) { weight_filename[i] = ' '; } weight_filename[64] = '\0'; } long long unsigned int bf_get_xid(){ return my_metadata.xid; } static cuComplex * d_beamformed = NULL; static cuComplex * d_data = NULL; static signed char * d_data1 = NULL; // Device memory for input data static signed char * d_data2 = NULL; static float * d_outputs; static cublasHandle_t handle; static cuComplex **d_arr_A = NULL; static cuComplex **d_arr_B = NULL; static cuComplex **d_arr_C = NULL; void init_beamformer(){ // Allocate memory for the weights, data, beamformer output, and sti output. cudaMalloc((void **)&d_weights, BN_WEIGHTS*sizeof(cuComplex)); //*BN_TIME cudaMalloc((void **)&d_data1, 2*BN_SAMP*sizeof(signed char)); //cudaMalloc((void **)&d_data2, 2*BN_SAMP*sizeof(signed char)); cudaMalloc((void **)&d_data, BN_SAMP*sizeof(cuComplex)); cudaError_t err_malloc = cudaMalloc((void **)&d_beamformed, BN_TBF*sizeof(cuComplex)); if (err_malloc != cudaSuccess) { printf("CUDA Error (cudaMalloc2): %s\n", cudaGetErrorString(err_malloc)); } cudaMalloc((void **)&d_outputs, BN_POL*(BN_OUTPUTS*sizeof(float)/2)); /********************************************************** * Create a handle for CUBLAS **********************************************************/ cublasCreate(&handle); cudaError_t cudaStat; int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C; nr_rows_A = BN_BEAM; nr_cols_A = BN_ELE_BLOC; nr_rows_B = BN_ELE_BLOC; nr_cols_B = BN_TIME; nr_rows_C = BN_BEAM; nr_cols_C = BN_TIME; // Allocate memory to host arrays - This is all memory allocated to arrays that are used by gemmBatched. Allocate 3 arrays on CPU const cuComplex **h_arr_A = 0; const cuComplex **h_arr_B = 0; cuComplex **h_arr_C = 0; //h_arr_A = (const cuComplex **)malloc(nr_rows_A * nr_cols_A *BN_BIN*sizeof(const cuComplex*)); //h_arr_B = (const cuComplex **)malloc(nr_rows_B * nr_cols_B *BN_BIN*sizeof(const cuComplex*)); //h_arr_C = (cuComplex **)malloc(nr_rows_C * nr_cols_C *BN_BIN*sizeof(cuComplex*)); ///////////////////////////////////////////////////////////////////////// cudaMallocHost((void **)&h_arr_A, nr_rows_A * nr_cols_A *BN_BIN*sizeof(const cuComplex*)); cudaMallocHost((void **)&h_arr_B, nr_rows_B * nr_cols_B *BN_BIN*sizeof(const cuComplex*)); cudaMallocHost((void **)&h_arr_C, nr_rows_C * nr_cols_C *BN_BIN*sizeof(cuComplex*)); ///////////////////////////////////////////////////////////////////////// // Allocate memory for each batch in an array. for(int i = 0; i < BN_BIN; i++){ h_arr_A[i] = d_weights + i*nr_rows_A*nr_cols_A; h_arr_B[i] = d_data + i*nr_rows_B*nr_cols_B; h_arr_C[i] = d_beamformed + i*nr_rows_C*nr_cols_C; } // Allocate memory to arrays on device. cudaStat = cudaMalloc((void **)&d_arr_A,nr_rows_A * nr_cols_A * BN_BIN * sizeof(cuComplex*)); assert(!cudaStat); cudaStat = cudaMalloc((void **)&d_arr_B,nr_rows_B * nr_cols_B * BN_BIN * sizeof(cuComplex*)); assert(!cudaStat); cudaStat = cudaMalloc((void **)&d_arr_C,nr_rows_C * nr_cols_C * BN_BIN * sizeof(cuComplex*)); assert(!cudaStat); // Copy memory from host to device. cudaStat = cudaMemcpy(d_arr_A,h_arr_A,nr_rows_A * nr_cols_A * BN_BIN * sizeof(cuComplex*),cudaMemcpyHostToDevice); assert(!cudaStat); cudaStat = cudaMemcpy(d_arr_B,h_arr_B,nr_rows_B * nr_cols_B * BN_BIN * sizeof(cuComplex*),cudaMemcpyHostToDevice); assert(!cudaStat); cudaStat = cudaMemcpy(d_arr_C,h_arr_C,nr_rows_C * nr_cols_C * BN_BIN * sizeof(cuComplex*),cudaMemcpyHostToDevice); assert(!cudaStat); //////////////////////////////////////////////////////////////////////////////// // CUDA streams applied for optimization to possibly eliminate stalls. // const int time_stream = 2000; // const int nStreamsB = nr_cols_B/time_stream; // Number of streams // const int streamSizeB = nr_rows_B*BN_BIN*time_stream; // const int streamBytesB = streamSizeB * sizeof(cuComplex*); // Create events and streams // //cudaStream_t streamB[nStreamsB]; // cudaStream_t streamCol[nStreamsB]; // for (int i = 0; i < nStreamsB; ++i) { // //cudaStreamCreate(&streamB[i]); // cudaStreamCreate(&streamCol[i]); // } // for (int i = 0; i < nStreamsB; ++i){ // int offset = i * streamSizeB; // //cudaStat = cudaMemcpyAsync(&d_arr_B[offset], &h_arr_B[offset], streamBytesB, cudaMemcpyHostToDevice, streamB[i]); // cudaStat = cudaMemcpyAsync(&d_arr_B[offset], &h_arr_B[offset], streamBytesB, cudaMemcpyHostToDevice, streamCol[i]); // assert(!cudaStat); // } // CUDA streams applied for optimization to possibly eliminate stalls. // const int nStreamsC = nr_cols_C/time_stream; // Number of streams // const int streamSizeC = nr_rows_C*BN_BIN*time_stream; // const int streamBytesC = streamSizeC * sizeof(cuComplex*); // Create events and streams // //cudaStream_t streamC[nStreamsC]; // //for (int i = 0; i < nStreamsC; ++i) { // // cudaStreamCreate(&streamC[i]); // //} // for (int i = 0; i < nStreamsC; ++i){ // int offset = i * streamSizeC; // //cudaStat = cudaMemcpyAsync(&d_arr_C[offset], &h_arr_C[offset], streamBytesC, cudaMemcpyHostToDevice, streamC[i]); // cudaStat = cudaMemcpyAsync(&d_arr_C[offset], &h_arr_C[offset], streamBytesC, cudaMemcpyHostToDevice, streamCol[i]); // assert(!cudaStat); // } // for (int i = 0; i < nStreamsC; ++i) { // cudaStreamDestroy(streamCol[i]); // } /////////////////////////////////////////////////////////////////////////////// //free(h_arr_A); //free(h_arr_B); //free(h_arr_C); // Free pinned memory ///////////////////////////////////////////////////////// cudaFreeHost(h_arr_A); cudaFreeHost(h_arr_B); cudaFreeHost(h_arr_C); /////////////////////////////////////////////////////////////////////////////// return; } signed char * data_in(char * input_filename){ FILE * data; // File data pointers signed char * bf_data; // Complex data pointers // float complex * data_dc; // Allocate heap memory for file data //bf_data = (signed char *)malloc(2*BN_SAMP*sizeof(signed char)); // For pinned memory //////////////////////////////////////////////////// cudaMallocHost((void **)&bf_data, 2*BN_SAMP*sizeof(signed char)); ///////////////////////////////////////////////////////////////////////// //data_dc = (float complex *)malloc(BN_SAMP*sizeof(float complex *)); // Open files data = fopen(input_filename, "r"); /********************************************************* * Read in Data *********************************************************/ if (data != NULL) { fread(bf_data, sizeof(signed char), 2*BN_SAMP, data); /* int j; // Make 'em complex! for (j = 0; j < BN_SAMP; j++) { data_dc[j] = bf_data[2*j] + bf_data[(2*j)+1]*I; } */ // Specify grid and block dimensions // dim3 dimBlock_d(BN_ELE, 1, 1); // dim3 dimGrid_d(BN_TIME, BN_BIN, 1); //cuComplex * d_data_in = d_data1; //cuComplex * d_data_out = d_data; //cudaMemcpy(d_data_in, data_dc, BN_SAMP*sizeof(cuComplex), cudaMemcpyHostToDevice); // Restructure data for cublasCgemmBatched function. //data_restructure<<<dimGrid_d, dimBlock_d>>>(d_data_in, d_data_out); fclose(data); } // Don't free pinned memory because it is this functions return value //// //cudaFreeHost(bf_data); ///////////////////////////////////////////////////////////////////////// //free(bf_data); //free(data_dc); return bf_data; } void beamform() { int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C; nr_rows_A = BN_BEAM; nr_cols_A = BN_ELE_BLOC; nr_rows_B = BN_ELE_BLOC; nr_cols_B = BN_TIME; nr_rows_C = BN_BEAM; // Leading dimensions are always the rows of each matrix since the data is stored in a column-wise order. int lda=nr_rows_A, ldb=nr_rows_B, ldc=nr_rows_C; cuComplex alf; cuComplex bet; alf.x = 1; alf.y = 0; bet.x = 0; bet.y = 0; int batchCount = BN_BIN; // There must be the same number of batches in each array. cublasStatus_t stat; /* This function performs a matrix multiplication of the data and the weights. Weights - d_arr_A, Data - d_arr_B, and the output - d_arr_C. */ stat = cublasCgemmBatched( handle, // handle to the cuBLAS library context. CUBLAS_OP_N, // Operation on matrices within array A. CUBLAS_OP_N, // Operation on matrices within array B. nr_rows_A, // Number of rows in matrix A and C. nr_cols_B, // Number of columns in matrix B and C. nr_cols_A, // Number of columns and rows in matrix A and B respectively. &alf, // Scalar used for multiplication. (const cuComplex **)d_arr_A, // Weight array of pointers. lda, // Leading dimension of each batch or matrix in array A. (const cuComplex **)d_arr_B, // Data array of pointers. ldb, // Leading dimension of each batch or matrix in array B. &bet, // Scalar used for multiplication. (cuComplex **)d_arr_C, // Output array of pointers. ldc, // Leading dimension of each batch or matrix in array C. batchCount); // Number of batches in each array. if (stat == CUBLAS_STATUS_INVALID_VALUE) { printf("RTBF: Invalid CUBLAS values\n"); } else if (stat == CUBLAS_STATUS_EXECUTION_FAILED) { printf("RTBF: Execution failed.\n"); } if(stat != CUBLAS_STATUS_SUCCESS){ cerr << "cublasCgemmBatched failed" << endl; exit(1); } assert(!cudaGetLastError()); } __global__ void transpose(signed char* data, signed char* tra_data) { int i = threadIdx.x; int c = threadIdx.y; int m = blockIdx.x; int f = blockIdx.y; int t = blockIdx.z; //int Nm = gridDim.x; // number of mcnts (packets) int Nf = gridDim.y; // number of f-engines (ROACHES) int Nt = gridDim.z; // time samples per mcnt int Ni = blockDim.x; // inputs per f-engine (aka antenna elements per ROACH) int Nc = blockDim.y; // bins per mcnt int in_idx = i + Ni*c + Nc*Ni*t + Nt*Nc*Ni*f + Nf*Nt*Nc*Ni*m; int out_idx = i + Ni*f + Nf*Ni*c + Nc*Nf*Ni*t + Nt*Nc*Nf*Ni*m; tra_data[2*out_idx] = data[2*in_idx]; tra_data[2*out_idx + 1] = data[2*in_idx+1]; return; } __global__ void data_restructure(signed char * data, cuComplex * data_restruc){ //void data_restructure(signed char * data, cuComplex * data_restruc, int offset){ /* Repurpose the transpose thread in the hashpipe codes by performing the transpose in the GPU. The motivation was, why transpose then transpose again? Why not just perform one transpose in the GPU which would be faster anyway. */ int i = threadIdx.x; int c = threadIdx.y; int m = blockIdx.x; int f = blockIdx.y; int t = blockIdx.z; int Nm = gridDim.x; // number of mcnts (packets) int Nf = gridDim.y; // number of f-engines (ROACHES) int Nt = gridDim.z; // time samples per mcnt int Ni = blockDim.x; // inputs per f-engine (aka antenna elements per ROACH) int Nc = blockDim.y; // bins per mcnt // Offset for CUDA streams ////////////////////////////////////////////// //int in_idx = offset + (i + Ni*c + Nc*Ni*t + Nt*Nc*Ni*f + Nf*Nt*Nc*Ni*m); //int out_idx = offset + (i + Ni*f + Nf*Ni*t + Nt*Nf*Ni*m + Nm*Nt*Nf*Ni*c); ///////////////////////////////////////////////////////////////////////// int in_idx = i + Ni*c + Nc*Ni*t + Nt*Nc*Ni*f + Nf*Nt*Nc*Ni*m; int out_idx = i + Ni*f + Nf*Ni*t + Nt*Nf*Ni*m + Nm*Nt*Nf*Ni*c; data_restruc[out_idx].x = data[2*in_idx]*1.0f; data_restruc[out_idx].y = data[2*in_idx + 1]*1.0f; return; /* // Original Code int e = threadIdx.x; int t = blockIdx.x; int f = blockIdx.y; //Restructure data so that the frequency bin is the slowest moving index data_restruc[f*BN_TIME*BN_ELE_BLOC + t*BN_ELE_BLOC + e].x = data[2*(t*BN_BIN*BN_ELE_BLOC + f*BN_ELE_BLOC + e)]*1.0f; data_restruc[f*BN_TIME*BN_ELE_BLOC + t*BN_ELE_BLOC + e].y = data[2*(t*BN_BIN*BN_ELE_BLOC + f*BN_ELE_BLOC + e) + 1]*1.0f; return; */ } __global__ void sti_reduction(cuComplex * data_in, float * data_out) { int f = blockIdx.x; int b = blockIdx.y; int t = threadIdx.x; int s = blockIdx.z; int h = sample_idx(s*BN_TIME_STI + t,b,f); // Preprocessor macro used for the output of the beamformer. More detail can be seen in the header file. (First set of beams) int h1 = sample_idx(s*BN_TIME_STI + t,b+BN_BEAM1,f); // Preprocessor macro used for the output of the beamformer. More detail can be seen in the header file. (Last set of beams) // Temporary variables used for updating. float beam_power1; float beam_power2; float cross_power1; float cross_power2; cuFloatComplex samp1; cuFloatComplex samp2; float scale = 1.0/BN_TIME_STI; // Scale power by number of samples per STI window. __shared__ cuFloatComplex reduced_array1[BN_STI_BLOC]; __shared__ cuFloatComplex reduced_array[BN_STI_BLOC]; if (t < BN_TIME_STI) { // X polarization (XX*). samp1.x = data_in[h].x; samp1.y = data_in[h].y; beam_power1 = (samp1.x * samp1.x) + (samp1.y * samp1.y); // Beamformer output multiplied by its conjugate (absolute value squared). reduced_array[t].x = beam_power1; // Y polarization (YY*). samp2.x = data_in[h1].x; samp2.y = data_in[h1].y; beam_power2 = (samp2.x * samp2.x) + (samp2.y * samp2.y); // Beamformer output multiplied by its conjugate (absolute value squared). reduced_array[t].y = beam_power2; // Cross polarization (XY*). cross_power1 = (samp1.x * samp2.x) + (samp1.y * samp2.y); // Real part of cross polarization. cross_power2 = (samp1.y * samp2.x) - (samp1.x * samp2.y); // Imaginary part of cross polarization. reduced_array1[t].x = cross_power1; reduced_array1[t].y = cross_power2; } else{ reduced_array[t].x = 0.0; reduced_array[t].y = 0.0; reduced_array1[t].x = 0.0; reduced_array1[t].y = 0.0; } __syncthreads(); // Reduction is performed by splitting up the threads in each block and summing them all up. // The number of threads in each block needs to be a power of two in order for the reduction to work. (No left over threads). for(int k = blockDim.x/2; k>0; k>>=1){ if(t<k){ reduced_array[t].x += reduced_array[t+k].x; reduced_array[t].y += reduced_array[t+k].y; reduced_array1[t].x += reduced_array1[t+k].x; reduced_array1[t].y += reduced_array1[t+k].y; } __syncthreads(); } // After reduction is complete, assign each reduced to value to appropriate position in output array. if(t == 0){ data_out[output_idx(0,b,s,f)] = reduced_array[0].x*scale; // XX*. data_out[output_idx(1,b,s,f)] = reduced_array[0].y*scale; // YY*. data_out[output_idx(2,b,s,f)] = reduced_array1[0].x*scale; // XY* real. data_out[output_idx(3,b,s,f)] = reduced_array1[0].y*scale; // XY* imaginary. } return; } void run_beamformer(signed char * data_in, float * data_out) { cudaError_t err_code; // Specify grid and block dimensions dim3 dimBlock(BN_STI_BLOC, 1, 1); dim3 dimGrid(BN_BIN, BN_BEAM1, BN_STI); // Specify grid and block dimensions dim3 dimBlock_d(BN_ELE_BLOC, 1, 1); dim3 dimGrid_d(BN_TIME, BN_BIN, 1); int Nm = 200; // Halfed for CUDA streams offset = 100; //int Nm = 400; int Nf = 8; int Nt = 20; int Nc = 25; //int Nc = 20; int Ni = 8; dim3 gridDim_transpose(Nm, Nf, Nt); dim3 blockDim_transpose(Ni, Nc, 1); signed char* d_tra_data_in = d_data1; //signed char* d_tra_data_out = d_data2; //signed char * d_restruct_in = d_data1; cuComplex * d_restruct_out = d_data; //cudaMemcpy(d_restruct_in, data_in, 2*BN_SAMP*sizeof(signed char), cudaMemcpyHostToDevice); // Previously the cudaMemcpy below ///////////////////////////////////////////// cudaMemcpy(d_tra_data_in, data_in, 2*BN_SAMP*sizeof(signed char), cudaMemcpyHostToDevice); //////////////////////////////////////////////////////////////////////////////// // CUDA streams and events applied for optimization to possibly eliminate stalls. // const int time_streamIn = 2000; // const int nStreamsIn = BN_TIME/time_streamIn; // Number of streams // const int streamSizeIn = 2*BN_ELE_BLOC*BN_BIN*time_streamIn; // const int streamBytesIn = streamSizeIn * sizeof(signed char); // Create events and streams // Events //////////////////////////////////// // cudaEvent_t startEvent, stopEvent; ////////////////////////////////////////////// // //cudaStream_t streamIn[nStreamsIn]; // cudaStream_t stream[nStreamsIn]; // Events //////////////////////////////////// // cudaEventCreate(&startEvent); // cudaEventCreate(&stopEvent); // cudaEventRecord(startEvent, 0); ///////////////////////////////////////////// // for (int i = 0; i < nStreamsIn; ++i) { // //cudaStreamCreate(&streamIn[i]); // cudaStreamCreate(&stream[i]); //} // for (int i = 0; i < nStreamsIn; ++i){ // int offset = i * streamSizeIn; // //cudaMemcpyAsync(&d_tra_data_in[offset], &data_in[offset], streamBytesIn, cudaMemcpyHostToDevice, streamIn[i]); // cudaMemcpyAsync(&d_tra_data_in[offset], &data_in[offset], streamBytesIn, cudaMemcpyHostToDevice, stream[i]); // } // Events //////////////////////////////////// // cudaEventRecord(stopEvent, 0); // cudaEventSynchronize(stopEvent); ///////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// err_code = cudaGetLastError(); if (err_code != cudaSuccess) { printf("RTBF: cudaMemcpy Failed: %s\n", cudaGetErrorString(err_code)); } // Transpose the data // transpose<<<gridDim_transpose, blockDim_transpose>>>(d_tra_data_in, d_tra_data_out); // if (err_code != cudaSuccess) { // printf("RTBF: CUDA Error (transpose): %s\n", cudaGetErrorString(err_code)); // } // Restructure data for cublasCgemmBatched function. // Original data_restructure() kernel ///////////////////////////////////////////////////// data_restructure<<<gridDim_transpose, blockDim_transpose>>>(d_tra_data_in, d_restruct_out); /////////////////////////////////////////////////////////////////////////////////////////// //for (int i = 0; i < nStreamsIn; ++i) { // int offset = i * (streamSizeIn/2); // data_restructure<<<gridDim_transpose, blockDim_transpose, 0, streamIn[i]>>>(d_tra_data_in, d_restruct_out,offset); //} /////////////////////////////////////////////////////////////////////////////////////////// //data_restructure<<<gridDim_transpose, blockDim_transpose>>>(d_restruct_in, d_restruct_out); //data_restructure<<<dimGrid_d, dimBlock_d>>>(d_restruct_in, d_restruct_out); if (err_code != cudaSuccess) { printf("RTBF: CUDA Error (data_restructure): %s\n", cudaGetErrorString(err_code)); } // Call beamformer function containing cublasCgemmBatched() beamform(); err_code = cudaGetLastError(); if (err_code != cudaSuccess) { printf("CUDA Error (beamform): %s\n", cudaGetErrorString(err_code)); } cuComplex * d_sti_in = d_beamformed; float * d_sti_out = d_outputs; // Call STI reduction kernel. sti_reduction<<<dimGrid, dimBlock>>>(d_sti_in, d_sti_out); err_code = cudaGetLastError(); if (err_code != cudaSuccess) { printf("CUDA Error (sti_reduction): %s\n", cudaGetErrorString(err_code)); } // Copy output data from device to host. // Previously the cudaMemcpy below ////////////////////////////////////////////// cudaMemcpy(data_out, d_sti_out, BN_POL*(BN_OUTPUTS*sizeof(float)/2),cudaMemcpyDeviceToHost); //////////////////////////////////////////////////////////////////////////////// // CUDA streams applied for optimization to possibly eliminate stalls. // const int time_streamOut = 50; // const int nStreamsOut = BN_STI/time_streamOut; // Number of streams // const int streamSizeOut = BN_POL*BN_BEAM1*BN_BIN*time_streamOut; // const int streamBytesOut = streamSizeOut * sizeof(float); // Create events and streams // //cudaStream_t streamOut[nStreamsOut]; // //for (int i = 0; i < nStreamsOut; ++i) { // // cudaStreamCreate(&streamOut[i]); // //} // Events //////////////////////////////////// // cudaEventRecord(startEvent, 0); ///////////////////////////////////////////// // for (int i = 0; i < nStreamsOut; ++i){ // int offset = i * streamSizeOut; // //cudaMemcpyAsync(&data_out[offset], &d_sti_out[offset], streamBytesOut,cudaMemcpyDeviceToHost, streamOut[i]); // cudaMemcpyAsync(&data_out[offset], &d_sti_out[offset], streamBytesOut,cudaMemcpyDeviceToHost, stream[i]); // } // Events //////////////////////////////////// // cudaEventRecord(stopEvent, 0); // cudaEventSynchronize(stopEvent); ///////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Clean up streams // cudaEventDestroy(startEvent); // cudaEventDestroy(stopEvent); // //for (int i = 0; i < nStreamsIn; ++i) { // // cudaStreamDestroy(streamIn[i]); // //} // for (int i = 0; i < nStreamsIn; ++i) { // cudaStreamDestroy(stream[i]); // } // //for (int i = 0; i < nStreamsOut; ++i) { // // cudaStreamDestroy(streamOut[i]); // //} ////////////////////////////////////////////////////////////////////////////// return; } void rtbfCleanup() { // Free up GPU memory at the end of a program if (d_beamformed != NULL) { cudaFree(d_beamformed); } if (d_data != NULL) { cudaFree(d_data); } if (d_data1 != NULL) { cudaFree(d_data1); //cudaFreeHost(d_data1); // Clean up pinned memory (use cudaFreeHost() with cudaMallocHost()) } if (d_data2 != NULL) { cudaFree(d_data2); } if (d_outputs != NULL) { cudaFree(d_outputs); } if (d_weights != NULL) { cudaFree(d_weights); //cudaFreeHost(d_weights); // Clean up pinned memory (use cudaFreeHost() with cudaMallocHost()) } if (d_arr_A != NULL) { cudaFree(d_arr_A); } if (d_arr_B != NULL) { cudaFree(d_arr_B); } if (d_arr_C != NULL) { cudaFree(d_arr_C); } // Free up and release cublas handle cublasDestroy(handle); }
f16b6ea31634ad90d8a51e6c1e5133f6e682555e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by Zayd Hammoudeh on 10/26/20. // #include <cassert> #include <cfloat> #include <hiprand/hiprand_kernel.h> #include <list> #include <omp.h> #include "base_pso.h" #include "cuda_config.cuh" #include "cuda_pso.cuh" #include "types_general.h" #define PARALLEL_MAX_REDUCE __global__ void initRand(hiprandState_t * state) { IntType idx = blockIdx.x * blockDim.x + threadIdx.x; hiprand_init(clock64(), idx, 0, &state[idx]); } __global__ void vecAdd(CudaMat v, CudaParam scalar) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; v[id] += scalar; } __global__ void vecScale(CudaMat v, CudaParam scalar) { // Get our global thread ID IntType id = blockIdx.x * blockDim.x + threadIdx.x; // Make sure we do not go out of bounds v[id] *= scalar; } __global__ void vecCwiseProd(CudaMat dest, const CudaMat other) { // NOLINT(readability-non-const-parameter,misc-misplaced-const) // Get our global thread ID IntType id = blockIdx.x * blockDim.x + threadIdx.x; // Make sure we do not go out of bounds dest[id] *= other[id]; } __global__ void vecDiff(CudaMat dest, const CudaMat left, const CudaMat right) { // Get our global thread ID IntType id = blockIdx.x * blockDim.x + threadIdx.x; // Make sure we do not go out of bounds dest[id] = left[id] - right[id]; } __global__ void vecClip(CudaMat v, CudaParam bound_lo, CudaParam bound_hi) { // Get our global thread ID IntType id = blockIdx.x * blockDim.x + threadIdx.x; v[id] = fmax(bound_lo, fmin(v[id], bound_hi)); } /** y = aX + y */ __global__ void vecSaxpy(CudaMat y, CudaParam a, const CudaMat x) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; y[id] += a * x[id]; } __global__ void vecRand(hiprandState_t * state, CudaMat v) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; #if CudaParam == double v[id] = hiprand_uniform_double(state + id); #elif CudaParam == float v[id] = hiprand_uniform(&state[id]); #else assert(false); #endif } __global__ void vecRandFast(IntType * prngs, CudaMat v, CudaParam lo, CudaParam diff) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; prngs[id] = 1103515245 * prngs[id] + 12345; v[id] = lo + diff * (prngs[id] & 0x3FFFFFFF) / 0x3FFFFFFF; } /** * Uses the best vector position and calculates the relative position for use in calculating * the velocity. * @param dest Location to store the position * @param best Best overall position so far * @param parts_ Vector of all particle locations * @param n_part Number of particles */ __global__ void vecBestDiff(CudaMat dest, CudaMat best, CudaMat parts_) { IntType idx = blockIdx.x * blockDim.x + threadIdx.x; dest[idx] = best[threadIdx.x] - parts_[idx]; } __global__ void vecCombine(CudaMat parts, CudaMat velos, CudaMat const tmp_p, CudaMat const tmp_g, const CudaParam momentum, const CudaParam bound_lo, const CudaParam bound_hi, const CudaParam lr) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; CudaParam diff = bound_hi - bound_lo; velos[id] = momentum * velos[id] + tmp_p[id] + tmp_g[id]; parts[id] += lr * velos[id]; velos[id] = fmax(-diff, fmin(velos[id], diff)); parts[id] = fmax(bound_lo, fmin(parts[id], bound_hi)); } __global__ void vecPoint(CudaMat tmp, CudaMat const pos_best, CudaMat const parts, CudaMat const r_p, const CudaParam rate_point) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; tmp[id] = rate_point * r_p[id] * (pos_best[id] - parts[id]); } __global__ void vecGlobal(CudaMat tmp, CudaMat const best_gpu, CudaMat const parts, CudaMat const r_g, const CudaParam rate_global) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; tmp[id] = rate_global * r_g[id] * (best_gpu[threadIdx.x] - parts[id]); } __global__ void updatePosBest(CudaMat parts, CudaMat parts_loss, CudaMat pos_best, // NOLINT(misc-misplaced-const,readability-non-const-parameter) CudaMat pos_best_loss) { IntType idx = blockIdx.x; // Update the best position for the part if (pos_best_loss[idx] > parts_loss[idx]) { IntType tid = threadIdx.x; IntType offset = idx * blockDim.x + tid; pos_best[offset] = parts[offset]; __syncthreads(); if (tid == 0) pos_best_loss[idx] = parts_loss[idx]; } } #ifdef PARALLEL_MAX_REDUCE __global__ void updateBest(CudaMat pos_best, CudaMat pos_best_loss, CudaMat best, CudaMat best_loss, IntType n_part, IntType dim) { unsigned int n_threads = blockDim.x; unsigned int tid = threadIdx.x; extern __shared__ float shared[]; CudaParam * all_loss = shared; int * best_idx = (int *)&shared[n_threads]; CudaParam t_loss = *best_loss; IntType b_idx = -1; for (IntType idx = tid; idx < n_part; idx += n_threads) { if (t_loss > pos_best_loss[idx]) { t_loss = pos_best_loss[idx]; b_idx = idx; } } all_loss[tid] = t_loss; best_idx[tid] = b_idx; for (unsigned int s = n_threads / 2; s > 0; s>>=1) { __syncthreads(); if (tid < s) { if (all_loss[tid] > all_loss[tid + s]) { all_loss[tid] = all_loss[tid + s]; best_idx[tid] = best_idx[tid + s]; } } } // Copy the best overall result only once if (tid == 0 && best_idx[0] >= 0) { IntType offset = best_idx[0] * dim; *best_loss = all_loss[0]; for (IntType j = 0; j < dim; j++) best[j] = pos_best[j+offset]; } } #else __global__ void updateBest(CudaMat pos_best, CudaMat pos_best_loss, CudaMat best, CudaMat best_loss, IntType n_part, IntType dim) { IntType best_idx; bool best_found = false; // Used to queue the best result for (IntType idx = 0; idx < n_part; idx++) { // Update the best overall position if (best_loss[0] > pos_best_loss[idx]) { best_loss[0] = pos_best_loss[idx]; best_idx = idx; best_found = true; } } // Copy the best overall result only once if (best_found) { IntType offset = best_idx * dim; for (IntType j = 0; j < dim; j++) best[j] = pos_best[j+offset]; } } #endif void CudaPSO::seedRngs() { if (this->fast_rand_) { std::seed_seq seq{time(nullptr)}; std::vector<std::uint32_t> seeds(this->tot_len_); seq.generate(seeds.begin(), seeds.end()); // Place the random seeds on the device IntType n_bytes = this->tot_len_ * sizeof(IntType); hipMalloc(&this->fast_rng_, n_bytes); hipMemcpy(this->fast_rng_, seeds.data(), n_bytes, hipMemcpyHostToDevice); } else { hipMalloc(&this->rng_, this->tot_len_ * sizeof(hiprandState_t)); hipLaunchKernelGGL(( initRand), dim3(DEFAULT_N_),dim3(M_), 0, 0, this->rng_); } } void CudaPSO::randomize(CudaMat vec, IntType n, CudaParam bound_lo, CudaParam bound_hi, hipStream_t * stream = nullptr) { CudaParam diff = bound_hi - bound_lo; if (this->fast_rand_) { if (!stream) { hipLaunchKernelGGL(( vecRandFast), dim3(DEFAULT_N_), dim3(M_), 0, 0, this->fast_rng_, vec, bound_lo, diff); } else { hipLaunchKernelGGL(( vecRandFast), dim3(DEFAULT_N_),dim3(M_),0,*stream, this->fast_rng_, vec, bound_lo, diff); } } else { if (!stream) { hipLaunchKernelGGL(( vecRand), dim3(DEFAULT_N_), dim3(M_), 0, 0, this->rng_, vec); if (diff != 1.) hipLaunchKernelGGL(( vecScale), dim3(DEFAULT_N_), dim3(M_), 0, 0, vec, diff); if (bound_lo != 0.) hipLaunchKernelGGL(( vecAdd), dim3(DEFAULT_N_), dim3(M_), 0, 0, vec, bound_lo); } else { hipLaunchKernelGGL(( vecRand), dim3(DEFAULT_N_),dim3(M_),0,*stream, this->rng_, vec); if (diff != 1.) hipLaunchKernelGGL(( vecScale), dim3(DEFAULT_N_),dim3(M_),0,*stream, vec, diff); if (bound_lo != 0.) hipLaunchKernelGGL(( vecAdd), dim3(DEFAULT_N_),dim3(M_),0,*stream, vec, bound_lo); } } } CudaPSO::CudaPSO(CudaConfig *config, const bool sep_kernel, const bool fast_rand) : BasePSO<CudaParam, CudaLoss>(config), tot_len_(config->n_particle() * config->dim()), M_(config->dim()), DEFAULT_N_(config->n_particle()), sep_kernel_(sep_kernel), fast_rand_(fast_rand) { assert(M_ > 0); assert(DEFAULT_N_ > 0); // Allocate the random number generator memory this->seedRngs(); this->best_ = new CudaParam[this->config_->dim()]; IntType tot_len_bytes = this->tot_len_ * (sizeof(int) + sizeof(CudaParam)); // Particle and velocity information hipMalloc(&this->parts_, tot_len_bytes); this->randomize(this->parts_, this->tot_len_, this->config_->bound_lo(), this->config_->bound_hi()); hipMalloc(&this->velos_, tot_len_bytes); CudaParam v_max = this->config_->bound_hi() - this->config_->bound_lo(); this->randomize(this->velos_, this->tot_len_, -v_max, v_max); } void CudaPSO::calcLossAndUpdate(IntType itr, CudaMat tmp_scratch, CudaMat pos_best, CudaMat pos_best_losses, CudaMat best_gpu, CudaMat best_loss_gpu) { CudaLoss loss = this->config_->loss_func(); const IntType bl_tr = 32; const IntType n_part_blocks = (this->config_->n_particle() + bl_tr + 1) / bl_tr; hipLaunchKernelGGL(( loss), dim3(n_part_blocks), dim3(bl_tr), 0, 0, tmp_scratch, this->parts_, this->config_->n_particle(), this->config_->dim(), this->config_->n_ele(), this->config_->ext_data(), this->config_->ext_labels()); hipLaunchKernelGGL(( updatePosBest), dim3(DEFAULT_N_), dim3(M_), 0, 0, this->parts_, tmp_scratch, pos_best, pos_best_losses); #ifdef PARALLEL_MAX_REDUCE int n_threads = ::min(64, (int)log2((float)this->config_->n_particle())); unsigned int shared = n_threads * (sizeof(int) + sizeof(CudaParam)); hipLaunchKernelGGL(( updateBest), dim3(1), dim3(n_threads), shared, 0, pos_best, pos_best_losses, best_gpu, best_loss_gpu, this->config_->n_particle(), this->config_->dim()); #else hipLaunchKernelGGL(( updateBest), dim3(1),dim3(1), 0, 0, pos_best, pos_best_losses, best_gpu, best_loss_gpu, this->config_->n_particle(), this->config_->dim()); #endif // printf("Iter: %d", itr); IntType best_len = this->config_->dim() * sizeof(CudaParam); hipMemcpy(this->best_, best_gpu, best_len, hipMemcpyDeviceToHost); hipMemcpy(&this->best_loss_, best_loss_gpu, sizeof(CudaParam), hipMemcpyDeviceToHost); // printf(" After\n"); if (this->config_->d()) this->printBest(itr); } void CudaPSO::fit_() { CudaMat r_p, r_g, pos_best, pos_best_losses, tmp_p, tmp_g, best_gpu, best_loss_gpu; IntType best_len = this->config_->dim() * sizeof(CudaParam); hipMalloc(&best_gpu, best_len); hipMalloc(&best_loss_gpu, sizeof(CudaParam)); // Set an high loss then overwrite this->best_loss_ = std::numeric_limits<CudaParam>::max(); hipMemcpy(best_loss_gpu, &this->best_loss_, sizeof(CudaParam), hipMemcpyHostToDevice); // List is used to both allocate and later free the CUDA memory IntType tot_len_bytes = this->tot_len_ * sizeof(CudaParam); std::list<CudaMat*> tot_len_ptrs = {&r_p, &r_g, &pos_best, &tmp_p, &tmp_g}; #pragma omp parallel for for (auto ptr : tot_len_ptrs) hipMalloc(ptr, tot_len_bytes); // Copy the best position information hipMemcpy(pos_best, this->parts_, tot_len_bytes, hipMemcpyDeviceToDevice); // Number of blocks when operating at the particle level -- not the parameter level hipMalloc(&pos_best_losses, this->config_->n_particle() * sizeof(CudaParam)); this->calcLossAndUpdate(0, pos_best_losses, pos_best, pos_best_losses, best_gpu, best_loss_gpu); IntType n_part_blocks = (this->config_->n_particle() + M_ + 1) / M_; hipStream_t stream1, stream2; hipStreamCreate(&stream1); hipStreamCreate(&stream2); // Iteration loop for (IntType itr = 1; itr <= this->config_->n_iter(); itr++) { if (this->sep_kernel_) { this->randomize(r_p, this->tot_len_, 0., 1.); this->randomize(r_g, this->tot_len_, 0., 1.); hipLaunchKernelGGL(( vecScale), dim3(DEFAULT_N_),dim3(M_), 0, 0, this->velos_, this->config_->momentum()); // Use particle's previous best information hipLaunchKernelGGL(( vecDiff), dim3(DEFAULT_N_),dim3(M_), 0, 0, tmp_p, pos_best, this->parts_); hipLaunchKernelGGL(( vecCwiseProd), dim3(DEFAULT_N_), dim3(M_), 0, 0, tmp_p, r_p); hipLaunchKernelGGL(( vecSaxpy), dim3(DEFAULT_N_),dim3(M_), 0, 0, this->velos_, this->config_->rate_point(), tmp_p); // Use global best particle information hipLaunchKernelGGL(( vecBestDiff), dim3(DEFAULT_N_),dim3(M_), 0, 0, tmp_g, best_gpu, this->parts_); hipLaunchKernelGGL(( vecCwiseProd), dim3(DEFAULT_N_), dim3(M_), 0, 0, tmp_g, r_g); hipLaunchKernelGGL(( vecSaxpy), dim3(DEFAULT_N_),dim3(M_), 0, 0, this->velos_, this->config_->rate_global(), tmp_g); hipLaunchKernelGGL(( vecSaxpy), dim3(DEFAULT_N_),dim3(M_), 0, 0, this->parts_, this->config_->lr(), this->velos_); // Update the position and velocities including clipping CudaParam v_max = this->config_->bound_hi() - this->config_->bound_lo(); hipLaunchKernelGGL(( vecClip), dim3(DEFAULT_N_),dim3(M_), 0, 0, this->velos_, -v_max, v_max); hipLaunchKernelGGL(( vecClip), dim3(DEFAULT_N_),dim3(M_), 0, 0, this->parts_, this->config_->bound_lo(), this->config_->bound_hi()); } else { this->randomize(r_p, this->tot_len_, 0., 1., &stream1); hipLaunchKernelGGL(( vecPoint), dim3(DEFAULT_N_),dim3(M_),0,stream1, tmp_p, pos_best, this->parts_, r_p, this->config_->rate_point()); this->randomize(r_g, this->tot_len_, 0., 1., &stream2); hipLaunchKernelGGL(( vecGlobal), dim3(DEFAULT_N_), dim3(M_),0,stream2, tmp_g, best_gpu, this->parts_, r_g, this->config_->rate_global()); hipDeviceSynchronize(); hipLaunchKernelGGL(( vecCombine), dim3(DEFAULT_N_), dim3(M_), 0, 0, this->parts_, this->velos_, tmp_p, tmp_g, this->config_->momentum(), this->config_->bound_lo(), this->config_->bound_hi(), this->config_->lr()); } // == Update the best position information this->calcLossAndUpdate(itr, tmp_p, pos_best, pos_best_losses, best_gpu, best_loss_gpu); } // ===== Cleanup for (auto ptr : tot_len_ptrs) hipFree(*ptr); hipFree(best_gpu); hipFree(best_loss_gpu); hipFree(pos_best_losses); }
f16b6ea31634ad90d8a51e6c1e5133f6e682555e.cu
// // Created by Zayd Hammoudeh on 10/26/20. // #include <cassert> #include <cfloat> #include <curand_kernel.h> #include <list> #include <omp.h> #include "base_pso.h" #include "cuda_config.cuh" #include "cuda_pso.cuh" #include "types_general.h" #define PARALLEL_MAX_REDUCE __global__ void initRand(curandState * state) { IntType idx = blockIdx.x * blockDim.x + threadIdx.x; curand_init(clock64(), idx, 0, &state[idx]); } __global__ void vecAdd(CudaMat v, CudaParam scalar) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; v[id] += scalar; } __global__ void vecScale(CudaMat v, CudaParam scalar) { // Get our global thread ID IntType id = blockIdx.x * blockDim.x + threadIdx.x; // Make sure we do not go out of bounds v[id] *= scalar; } __global__ void vecCwiseProd(CudaMat dest, const CudaMat other) { // NOLINT(readability-non-const-parameter,misc-misplaced-const) // Get our global thread ID IntType id = blockIdx.x * blockDim.x + threadIdx.x; // Make sure we do not go out of bounds dest[id] *= other[id]; } __global__ void vecDiff(CudaMat dest, const CudaMat left, const CudaMat right) { // Get our global thread ID IntType id = blockIdx.x * blockDim.x + threadIdx.x; // Make sure we do not go out of bounds dest[id] = left[id] - right[id]; } __global__ void vecClip(CudaMat v, CudaParam bound_lo, CudaParam bound_hi) { // Get our global thread ID IntType id = blockIdx.x * blockDim.x + threadIdx.x; v[id] = fmax(bound_lo, fmin(v[id], bound_hi)); } /** y = aX + y */ __global__ void vecSaxpy(CudaMat y, CudaParam a, const CudaMat x) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; y[id] += a * x[id]; } __global__ void vecRand(curandState * state, CudaMat v) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; #if CudaParam == double v[id] = curand_uniform_double(state + id); #elif CudaParam == float v[id] = curand_uniform(&state[id]); #else assert(false); #endif } __global__ void vecRandFast(IntType * prngs, CudaMat v, CudaParam lo, CudaParam diff) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; prngs[id] = 1103515245 * prngs[id] + 12345; v[id] = lo + diff * (prngs[id] & 0x3FFFFFFF) / 0x3FFFFFFF; } /** * Uses the best vector position and calculates the relative position for use in calculating * the velocity. * @param dest Location to store the position * @param best Best overall position so far * @param parts_ Vector of all particle locations * @param n_part Number of particles */ __global__ void vecBestDiff(CudaMat dest, CudaMat best, CudaMat parts_) { IntType idx = blockIdx.x * blockDim.x + threadIdx.x; dest[idx] = best[threadIdx.x] - parts_[idx]; } __global__ void vecCombine(CudaMat parts, CudaMat velos, CudaMat const tmp_p, CudaMat const tmp_g, const CudaParam momentum, const CudaParam bound_lo, const CudaParam bound_hi, const CudaParam lr) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; CudaParam diff = bound_hi - bound_lo; velos[id] = momentum * velos[id] + tmp_p[id] + tmp_g[id]; parts[id] += lr * velos[id]; velos[id] = fmax(-diff, fmin(velos[id], diff)); parts[id] = fmax(bound_lo, fmin(parts[id], bound_hi)); } __global__ void vecPoint(CudaMat tmp, CudaMat const pos_best, CudaMat const parts, CudaMat const r_p, const CudaParam rate_point) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; tmp[id] = rate_point * r_p[id] * (pos_best[id] - parts[id]); } __global__ void vecGlobal(CudaMat tmp, CudaMat const best_gpu, CudaMat const parts, CudaMat const r_g, const CudaParam rate_global) { IntType id = blockIdx.x * blockDim.x + threadIdx.x; tmp[id] = rate_global * r_g[id] * (best_gpu[threadIdx.x] - parts[id]); } __global__ void updatePosBest(CudaMat parts, CudaMat parts_loss, CudaMat pos_best, // NOLINT(misc-misplaced-const,readability-non-const-parameter) CudaMat pos_best_loss) { IntType idx = blockIdx.x; // Update the best position for the part if (pos_best_loss[idx] > parts_loss[idx]) { IntType tid = threadIdx.x; IntType offset = idx * blockDim.x + tid; pos_best[offset] = parts[offset]; __syncthreads(); if (tid == 0) pos_best_loss[idx] = parts_loss[idx]; } } #ifdef PARALLEL_MAX_REDUCE __global__ void updateBest(CudaMat pos_best, CudaMat pos_best_loss, CudaMat best, CudaMat best_loss, IntType n_part, IntType dim) { unsigned int n_threads = blockDim.x; unsigned int tid = threadIdx.x; extern __shared__ float shared[]; CudaParam * all_loss = shared; int * best_idx = (int *)&shared[n_threads]; CudaParam t_loss = *best_loss; IntType b_idx = -1; for (IntType idx = tid; idx < n_part; idx += n_threads) { if (t_loss > pos_best_loss[idx]) { t_loss = pos_best_loss[idx]; b_idx = idx; } } all_loss[tid] = t_loss; best_idx[tid] = b_idx; for (unsigned int s = n_threads / 2; s > 0; s>>=1) { __syncthreads(); if (tid < s) { if (all_loss[tid] > all_loss[tid + s]) { all_loss[tid] = all_loss[tid + s]; best_idx[tid] = best_idx[tid + s]; } } } // Copy the best overall result only once if (tid == 0 && best_idx[0] >= 0) { IntType offset = best_idx[0] * dim; *best_loss = all_loss[0]; for (IntType j = 0; j < dim; j++) best[j] = pos_best[j+offset]; } } #else __global__ void updateBest(CudaMat pos_best, CudaMat pos_best_loss, CudaMat best, CudaMat best_loss, IntType n_part, IntType dim) { IntType best_idx; bool best_found = false; // Used to queue the best result for (IntType idx = 0; idx < n_part; idx++) { // Update the best overall position if (best_loss[0] > pos_best_loss[idx]) { best_loss[0] = pos_best_loss[idx]; best_idx = idx; best_found = true; } } // Copy the best overall result only once if (best_found) { IntType offset = best_idx * dim; for (IntType j = 0; j < dim; j++) best[j] = pos_best[j+offset]; } } #endif void CudaPSO::seedRngs() { if (this->fast_rand_) { std::seed_seq seq{time(nullptr)}; std::vector<std::uint32_t> seeds(this->tot_len_); seq.generate(seeds.begin(), seeds.end()); // Place the random seeds on the device IntType n_bytes = this->tot_len_ * sizeof(IntType); cudaMalloc(&this->fast_rng_, n_bytes); cudaMemcpy(this->fast_rng_, seeds.data(), n_bytes, cudaMemcpyHostToDevice); } else { cudaMalloc(&this->rng_, this->tot_len_ * sizeof(curandState_t)); initRand<<<DEFAULT_N_,M_>>>(this->rng_); } } void CudaPSO::randomize(CudaMat vec, IntType n, CudaParam bound_lo, CudaParam bound_hi, cudaStream_t * stream = nullptr) { CudaParam diff = bound_hi - bound_lo; if (this->fast_rand_) { if (!stream) { vecRandFast<<<DEFAULT_N_, M_>>>(this->fast_rng_, vec, bound_lo, diff); } else { vecRandFast<<<DEFAULT_N_,M_,0,*stream>>>(this->fast_rng_, vec, bound_lo, diff); } } else { if (!stream) { vecRand<<<DEFAULT_N_, M_>>>(this->rng_, vec); if (diff != 1.) vecScale<<<DEFAULT_N_, M_>>>(vec, diff); if (bound_lo != 0.) vecAdd<<<DEFAULT_N_, M_>>>(vec, bound_lo); } else { vecRand<<<DEFAULT_N_,M_,0,*stream>>>(this->rng_, vec); if (diff != 1.) vecScale<<<DEFAULT_N_,M_,0,*stream>>>(vec, diff); if (bound_lo != 0.) vecAdd<<<DEFAULT_N_,M_,0,*stream>>>(vec, bound_lo); } } } CudaPSO::CudaPSO(CudaConfig *config, const bool sep_kernel, const bool fast_rand) : BasePSO<CudaParam, CudaLoss>(config), tot_len_(config->n_particle() * config->dim()), M_(config->dim()), DEFAULT_N_(config->n_particle()), sep_kernel_(sep_kernel), fast_rand_(fast_rand) { assert(M_ > 0); assert(DEFAULT_N_ > 0); // Allocate the random number generator memory this->seedRngs(); this->best_ = new CudaParam[this->config_->dim()]; IntType tot_len_bytes = this->tot_len_ * (sizeof(int) + sizeof(CudaParam)); // Particle and velocity information cudaMalloc(&this->parts_, tot_len_bytes); this->randomize(this->parts_, this->tot_len_, this->config_->bound_lo(), this->config_->bound_hi()); cudaMalloc(&this->velos_, tot_len_bytes); CudaParam v_max = this->config_->bound_hi() - this->config_->bound_lo(); this->randomize(this->velos_, this->tot_len_, -v_max, v_max); } void CudaPSO::calcLossAndUpdate(IntType itr, CudaMat tmp_scratch, CudaMat pos_best, CudaMat pos_best_losses, CudaMat best_gpu, CudaMat best_loss_gpu) { CudaLoss loss = this->config_->loss_func(); const IntType bl_tr = 32; const IntType n_part_blocks = (this->config_->n_particle() + bl_tr + 1) / bl_tr; loss<<<n_part_blocks, bl_tr>>>(tmp_scratch, this->parts_, this->config_->n_particle(), this->config_->dim(), this->config_->n_ele(), this->config_->ext_data(), this->config_->ext_labels()); updatePosBest<<<DEFAULT_N_, M_>>>(this->parts_, tmp_scratch, pos_best, pos_best_losses); #ifdef PARALLEL_MAX_REDUCE int n_threads = std::min(64, (int)log2((float)this->config_->n_particle())); unsigned int shared = n_threads * (sizeof(int) + sizeof(CudaParam)); updateBest<<<1, n_threads, shared>>>(pos_best, pos_best_losses, best_gpu, best_loss_gpu, this->config_->n_particle(), this->config_->dim()); #else updateBest<<<1,1>>>(pos_best, pos_best_losses, best_gpu, best_loss_gpu, this->config_->n_particle(), this->config_->dim()); #endif // printf("Iter: %d", itr); IntType best_len = this->config_->dim() * sizeof(CudaParam); cudaMemcpy(this->best_, best_gpu, best_len, cudaMemcpyDeviceToHost); cudaMemcpy(&this->best_loss_, best_loss_gpu, sizeof(CudaParam), cudaMemcpyDeviceToHost); // printf(" After\n"); if (this->config_->d()) this->printBest(itr); } void CudaPSO::fit_() { CudaMat r_p, r_g, pos_best, pos_best_losses, tmp_p, tmp_g, best_gpu, best_loss_gpu; IntType best_len = this->config_->dim() * sizeof(CudaParam); cudaMalloc(&best_gpu, best_len); cudaMalloc(&best_loss_gpu, sizeof(CudaParam)); // Set an high loss then overwrite this->best_loss_ = std::numeric_limits<CudaParam>::max(); cudaMemcpy(best_loss_gpu, &this->best_loss_, sizeof(CudaParam), cudaMemcpyHostToDevice); // List is used to both allocate and later free the CUDA memory IntType tot_len_bytes = this->tot_len_ * sizeof(CudaParam); std::list<CudaMat*> tot_len_ptrs = {&r_p, &r_g, &pos_best, &tmp_p, &tmp_g}; #pragma omp parallel for for (auto ptr : tot_len_ptrs) cudaMalloc(ptr, tot_len_bytes); // Copy the best position information cudaMemcpy(pos_best, this->parts_, tot_len_bytes, cudaMemcpyDeviceToDevice); // Number of blocks when operating at the particle level -- not the parameter level cudaMalloc(&pos_best_losses, this->config_->n_particle() * sizeof(CudaParam)); this->calcLossAndUpdate(0, pos_best_losses, pos_best, pos_best_losses, best_gpu, best_loss_gpu); IntType n_part_blocks = (this->config_->n_particle() + M_ + 1) / M_; cudaStream_t stream1, stream2; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); // Iteration loop for (IntType itr = 1; itr <= this->config_->n_iter(); itr++) { if (this->sep_kernel_) { this->randomize(r_p, this->tot_len_, 0., 1.); this->randomize(r_g, this->tot_len_, 0., 1.); vecScale<<<DEFAULT_N_,M_>>>(this->velos_, this->config_->momentum()); // Use particle's previous best information vecDiff<<<DEFAULT_N_,M_>>>(tmp_p, pos_best, this->parts_); vecCwiseProd<<<DEFAULT_N_, M_>>>(tmp_p, r_p); vecSaxpy<<<DEFAULT_N_,M_>>>(this->velos_, this->config_->rate_point(), tmp_p); // Use global best particle information vecBestDiff<<<DEFAULT_N_,M_>>>(tmp_g, best_gpu, this->parts_); vecCwiseProd<<<DEFAULT_N_, M_>>>(tmp_g, r_g); vecSaxpy<<<DEFAULT_N_,M_>>>(this->velos_, this->config_->rate_global(), tmp_g); vecSaxpy<<<DEFAULT_N_,M_>>>(this->parts_, this->config_->lr(), this->velos_); // Update the position and velocities including clipping CudaParam v_max = this->config_->bound_hi() - this->config_->bound_lo(); vecClip<<<DEFAULT_N_,M_>>>(this->velos_, -v_max, v_max); vecClip<<<DEFAULT_N_,M_>>>(this->parts_, this->config_->bound_lo(), this->config_->bound_hi()); } else { this->randomize(r_p, this->tot_len_, 0., 1., &stream1); vecPoint<<<DEFAULT_N_,M_,0,stream1>>>(tmp_p, pos_best, this->parts_, r_p, this->config_->rate_point()); this->randomize(r_g, this->tot_len_, 0., 1., &stream2); vecGlobal<<<DEFAULT_N_, M_,0,stream2>>>(tmp_g, best_gpu, this->parts_, r_g, this->config_->rate_global()); cudaDeviceSynchronize(); vecCombine<<<DEFAULT_N_, M_>>>(this->parts_, this->velos_, tmp_p, tmp_g, this->config_->momentum(), this->config_->bound_lo(), this->config_->bound_hi(), this->config_->lr()); } // == Update the best position information this->calcLossAndUpdate(itr, tmp_p, pos_best, pos_best_losses, best_gpu, best_loss_gpu); } // ===== Cleanup for (auto ptr : tot_len_ptrs) cudaFree(*ptr); cudaFree(best_gpu); cudaFree(best_loss_gpu); cudaFree(pos_best_losses); }
a536406be39f0ad6c47cecad6b2495eeccfe319e.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <cstdio> #include <cstring> #include <string> #include <algorithm> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> // #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> using namespace std; typedef double ld; typedef long long LL; namespace io_impl { inline bool maybe_digit(char c) { return c >= '0' && c <= '9'; } struct io_s { private: FILE *fin; FILE *fout; bool negative; bool ok; char ch; inline char next_char() { static char buf[100000], *p1 = buf, *p2 = buf; return p1 == p2 && (p2 = (p1 = buf) + fread(buf, 1, 100000, fin), p1 == p2) ? EOF : *p1++; } public: void init(FILE *_in, FILE *_out) { fin = _in; fout = _out; ch = next_char(); ok = true; } template <typename T> bool run(T &_v) { _v = 0; while (!maybe_digit(ch) && ch != EOF) ch = next_char(); if (ch == EOF) return ok = false; do { _v = (_v << 1) + (_v << 3) + ch - '0'; } while (maybe_digit(ch = next_char())); return true; } template <typename T> bool rd(T &_v) { negative = false; _v = 0; while (!maybe_digit(ch) && ch != EOF) { negative = ch == '-'; ch = next_char(); } if (ch == EOF) return ok = false; do { _v = (_v * 10) + (ch - '0'); } while (maybe_digit(ch = next_char())); static double _map[] = {1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6}; if (ch == '.') { int tp = 0; while (maybe_digit(ch = next_char())) { _v = (_v * 10) + (ch - '0'); ++tp; } _v *= _map[tp]; } if (negative) _v = -_v; return true; } }; } // namespace io_impl using namespace io_impl; io_s iokb; namespace output { const int OutputBufferSize = 1 << 20; char buffer[OutputBufferSize]; char *s = buffer; inline void flush() { fwrite(buffer, 1, s-buffer, stdout); s = buffer; fflush(stdout); } inline void print(const char ch) { // putchar(ch); return; if (s-buffer>OutputBufferSize-2) flush(); *s++ = ch; } inline void print(char *str) { while (*str!=0) print(char(*str++)); } inline void print(int x) { // printf("%d", x); return; char buf[25] = {0}, *p = buf; // if (x<0) print('-'), x=-x; // if (x == 0) print('0'); while (x) *(++p) = x%10, x/=10; while (p != buf) print(char(*(p--)+'0')); } inline void print(LL x) { // printf("%d", x); return; char buf[25] = {0}, *p = buf; if (x<0) print('-'), x=-x; if (x == 0) print('0'); while (x) *(++p) = x%10, x/=10; while (p != buf) print(char(*(p--)+'0')); } inline void print(ld v) { // printf("%.2f", x); // static int stk[70], tp; // tp = 0; if (fabs(v) < 0.005) { print('0'); return; } else { LL x = (LL)floor(v * 100 + 0.5); // cerr << "x=" << x << endl; print((LL)(x / 100)); print('.'); print((char)(x / 10 % 10 + '0')); print((char)(x % 10 + '0')); } } } struct ios { inline ios & operator >> (int &x){ iokb.run(x); return *this; } inline ios &operator>>(ld &x) { iokb.rd(x); return *this; } } io; // ====================================================== // const int max_shared_size = 6144; inline void handleCudaError(hipError_t err, string name = "fuck") { if (err != hipSuccess) { cerr << name << endl; cerr << hipGetErrorString(err) << endl; exit(0); } } ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c; int an, am, bn, bm; int n, m; void copyMatrix(ld *&src, ld *&dst, int n, int m) { int size = sizeof(ld) * n * m; handleCudaError(hipMalloc(&dst, size), "hipMalloc in copyMatrix"); handleCudaError(hipMemcpy(dst, src, size, hipMemcpyHostToDevice), "memcpy in copyMatrix"); } // template<typename T> // __global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) { // int index = blockDim.x * blockIdx.x + threadIdx.x; // int i = index / bm, j = index % bm; // if (i >= an || j >= bm) return; // ld sum = 0; // if (i < an && j < bm) { // for (int k=0; k<am; ++k) // sum += d_a[i * am + k] * d_b[k * bm + j]; // } // if (i * bm + j < an * bm) // d_c[i * bm + j] = sum; // // int index = threadIdx.x; // // if (index < an * bm) // // d_c[index] = 1; // } __global__ void matrixMult2(ld *d_a, ld *d_b, ld *d_c, int an, int bm, int am, int workload, int addi) { // __shared__ ld c_a[max_shared_size]; int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= an * bm) return; int st = min(index, addi) * (workload+1) + max(0, index - addi) * workload, ed = st + workload + (index < addi ? 1 : 0), base = st / bm * bm; if (st % bm == 0) base -= bm; // int shareda = min(am, max_shared_size); // shareda = 2; // shareda = 0; for (int p=st; p<ed; ++p) { // if (p % bm == 0) { // base += bm; // for (int j=0; j<shareda; ++j) { // c_a[j] = d_a[base + j]; // } // __syncthreads(); // } if (p % bm == 0) base += bm; int i = p / bm, j = p % bm; ld sum = 0; // for (int k=0; k<shareda; ++k) { // sum += c_a[k] * d_b[j * bm + k]; // } // for (int k=shareda; k<am; ++k) { for (int k=0; k<am; ++k) { sum += d_a[i * am + k] * d_b[k * bm + j]; } d_c[i * bm + j] = sum; } } void outputMatrix(ld *a, int n, int m) { // output::print(n); output::print(','); // output::print(m); output::print('\n'); for (int i=0; i<n; ++i) { int base = i * m; output::print(a[base]); for (int j=1; j<m; ++j) { output::print(','); output::print(a[base + j]); } output::print('\n'); } } int main() { // #ifndef Weaverzhu // freopen("input.txt", "r", stdin); freopen("output.txt", "w", stdout); iokb.init(fopen("input.txt", "r"), fopen("output.txt", "w")); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); // cerr << prop.multiProcessorCount << endl; io >> an >> am; h_a = (ld*)malloc(sizeof(ld) * an * am); for (int i=0; i<an; ++i) for (int j=0; j<am; ++j) io >> h_a[i*am + j]; io >> bn >> bm; h_b = (ld*)malloc(sizeof(ld) * bn * bm); for (int i=0; i<bn; ++i) for (int j=0; j<bm; ++j) io >> h_b[i*bm + j]; // B.readtrans(); // outputMatrix(h_a, an, am); // outputMatrix(h_b, bn, bm); n = an; m = bm; // int block_size = prop.maxThreadsPerBlock, grids = (n * m + block_size - 1) / block_size; int block_size = bm; int numBlocks; // hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, matrixMult2, block_size, 0); // double activeWarps = numBlocks * block_size / prop.warpSize, // maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize; // cerr << "occupancy = " << activeWarps / maxWarps * 100 << "% " << endl; // cerr << "numBlocks = " << numBlocks << "threads = "<< numBlocks * block_size <<endl; // exit(0); // int grids = numBlocks; // copyMatrix(h_a, d_a, an, am); // copyMatrix(h_b, d_b, bn, bm); // handleCudaError(hipMalloc(&d_c, sizeof(ld) * n * m), "allocate for h_c"); // int threads = grids * block_size; // int tot = an * bm; // int workload = (tot) / threads, size996 = tot % threads; // // fprintf(stderr, "stderr: threads=%d, tot=%d, workload=%d, addi=%d\n", threads, tot, workload, size996); // // exit(0); // // matrixMult<<<grids, block_size>>>(d_a, d_b, d_c, an, bm, am); // matrixMult2<<<grids, block_size>>>(d_a, d_b, d_c, an, bm, am, workload, size996); // h_c = (ld*)malloc(sizeof(ld) * n * m); // int size = sizeof(ld) * n * m; // handleCudaError(hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost), "memcpy back"); // outputMatrix(h_c, n, m); // output::flush(); return 0; }
a536406be39f0ad6c47cecad6b2495eeccfe319e.cu
#include <cmath> #include <cstdio> #include <cstring> #include <string> #include <algorithm> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> // #include <device_functions.h> #include <cuda_runtime_api.h> using namespace std; typedef double ld; typedef long long LL; namespace io_impl { inline bool maybe_digit(char c) { return c >= '0' && c <= '9'; } struct io_s { private: FILE *fin; FILE *fout; bool negative; bool ok; char ch; inline char next_char() { static char buf[100000], *p1 = buf, *p2 = buf; return p1 == p2 && (p2 = (p1 = buf) + fread(buf, 1, 100000, fin), p1 == p2) ? EOF : *p1++; } public: void init(FILE *_in, FILE *_out) { fin = _in; fout = _out; ch = next_char(); ok = true; } template <typename T> bool run(T &_v) { _v = 0; while (!maybe_digit(ch) && ch != EOF) ch = next_char(); if (ch == EOF) return ok = false; do { _v = (_v << 1) + (_v << 3) + ch - '0'; } while (maybe_digit(ch = next_char())); return true; } template <typename T> bool rd(T &_v) { negative = false; _v = 0; while (!maybe_digit(ch) && ch != EOF) { negative = ch == '-'; ch = next_char(); } if (ch == EOF) return ok = false; do { _v = (_v * 10) + (ch - '0'); } while (maybe_digit(ch = next_char())); static double _map[] = {1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6}; if (ch == '.') { int tp = 0; while (maybe_digit(ch = next_char())) { _v = (_v * 10) + (ch - '0'); ++tp; } _v *= _map[tp]; } if (negative) _v = -_v; return true; } }; } // namespace io_impl using namespace io_impl; io_s iokb; namespace output { const int OutputBufferSize = 1 << 20; char buffer[OutputBufferSize]; char *s = buffer; inline void flush() { fwrite(buffer, 1, s-buffer, stdout); s = buffer; fflush(stdout); } inline void print(const char ch) { // putchar(ch); return; if (s-buffer>OutputBufferSize-2) flush(); *s++ = ch; } inline void print(char *str) { while (*str!=0) print(char(*str++)); } inline void print(int x) { // printf("%d", x); return; char buf[25] = {0}, *p = buf; // if (x<0) print('-'), x=-x; // if (x == 0) print('0'); while (x) *(++p) = x%10, x/=10; while (p != buf) print(char(*(p--)+'0')); } inline void print(LL x) { // printf("%d", x); return; char buf[25] = {0}, *p = buf; if (x<0) print('-'), x=-x; if (x == 0) print('0'); while (x) *(++p) = x%10, x/=10; while (p != buf) print(char(*(p--)+'0')); } inline void print(ld v) { // printf("%.2f", x); // static int stk[70], tp; // tp = 0; if (fabs(v) < 0.005) { print('0'); return; } else { LL x = (LL)floor(v * 100 + 0.5); // cerr << "x=" << x << endl; print((LL)(x / 100)); print('.'); print((char)(x / 10 % 10 + '0')); print((char)(x % 10 + '0')); } } } struct ios { inline ios & operator >> (int &x){ iokb.run(x); return *this; } inline ios &operator>>(ld &x) { iokb.rd(x); return *this; } } io; // ====================================================== // const int max_shared_size = 6144; inline void handleCudaError(cudaError_t err, string name = "fuck") { if (err != cudaSuccess) { cerr << name << endl; cerr << cudaGetErrorString(err) << endl; exit(0); } } ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c; int an, am, bn, bm; int n, m; void copyMatrix(ld *&src, ld *&dst, int n, int m) { int size = sizeof(ld) * n * m; handleCudaError(cudaMalloc(&dst, size), "cudaMalloc in copyMatrix"); handleCudaError(cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice), "memcpy in copyMatrix"); } // template<typename T> // __global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) { // int index = blockDim.x * blockIdx.x + threadIdx.x; // int i = index / bm, j = index % bm; // if (i >= an || j >= bm) return; // ld sum = 0; // if (i < an && j < bm) { // for (int k=0; k<am; ++k) // sum += d_a[i * am + k] * d_b[k * bm + j]; // } // if (i * bm + j < an * bm) // d_c[i * bm + j] = sum; // // int index = threadIdx.x; // // if (index < an * bm) // // d_c[index] = 1; // } __global__ void matrixMult2(ld *d_a, ld *d_b, ld *d_c, int an, int bm, int am, int workload, int addi) { // __shared__ ld c_a[max_shared_size]; int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= an * bm) return; int st = min(index, addi) * (workload+1) + max(0, index - addi) * workload, ed = st + workload + (index < addi ? 1 : 0), base = st / bm * bm; if (st % bm == 0) base -= bm; // int shareda = min(am, max_shared_size); // shareda = 2; // shareda = 0; for (int p=st; p<ed; ++p) { // if (p % bm == 0) { // base += bm; // for (int j=0; j<shareda; ++j) { // c_a[j] = d_a[base + j]; // } // __syncthreads(); // } if (p % bm == 0) base += bm; int i = p / bm, j = p % bm; ld sum = 0; // for (int k=0; k<shareda; ++k) { // sum += c_a[k] * d_b[j * bm + k]; // } // for (int k=shareda; k<am; ++k) { for (int k=0; k<am; ++k) { sum += d_a[i * am + k] * d_b[k * bm + j]; } d_c[i * bm + j] = sum; } } void outputMatrix(ld *a, int n, int m) { // output::print(n); output::print(','); // output::print(m); output::print('\n'); for (int i=0; i<n; ++i) { int base = i * m; output::print(a[base]); for (int j=1; j<m; ++j) { output::print(','); output::print(a[base + j]); } output::print('\n'); } } int main() { // #ifndef Weaverzhu // freopen("input.txt", "r", stdin); freopen("output.txt", "w", stdout); iokb.init(fopen("input.txt", "r"), fopen("output.txt", "w")); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); // cerr << prop.multiProcessorCount << endl; io >> an >> am; h_a = (ld*)malloc(sizeof(ld) * an * am); for (int i=0; i<an; ++i) for (int j=0; j<am; ++j) io >> h_a[i*am + j]; io >> bn >> bm; h_b = (ld*)malloc(sizeof(ld) * bn * bm); for (int i=0; i<bn; ++i) for (int j=0; j<bm; ++j) io >> h_b[i*bm + j]; // B.readtrans(); // outputMatrix(h_a, an, am); // outputMatrix(h_b, bn, bm); n = an; m = bm; // int block_size = prop.maxThreadsPerBlock, grids = (n * m + block_size - 1) / block_size; int block_size = bm; int numBlocks; // cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, matrixMult2, block_size, 0); // double activeWarps = numBlocks * block_size / prop.warpSize, // maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize; // cerr << "occupancy = " << activeWarps / maxWarps * 100 << "% " << endl; // cerr << "numBlocks = " << numBlocks << "threads = "<< numBlocks * block_size <<endl; // exit(0); // int grids = numBlocks; // copyMatrix(h_a, d_a, an, am); // copyMatrix(h_b, d_b, bn, bm); // handleCudaError(cudaMalloc(&d_c, sizeof(ld) * n * m), "allocate for h_c"); // int threads = grids * block_size; // int tot = an * bm; // int workload = (tot) / threads, size996 = tot % threads; // // fprintf(stderr, "stderr: threads=%d, tot=%d, workload=%d, addi=%d\n", threads, tot, workload, size996); // // exit(0); // // matrixMult<<<grids, block_size>>>(d_a, d_b, d_c, an, bm, am); // matrixMult2<<<grids, block_size>>>(d_a, d_b, d_c, an, bm, am, workload, size996); // h_c = (ld*)malloc(sizeof(ld) * n * m); // int size = sizeof(ld) * n * m; // handleCudaError(cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost), "memcpy back"); // outputMatrix(h_c, n, m); // output::flush(); return 0; }
55e794eaed383ee0da00ab4cfd42e21beb0378dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define BLOCK_SIZE 512 // Maximum number of elements that can be inserted into a block queue #define BQ_CAPACITY 2048 #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while (0) // Global queuing stub __global__ void gpu_global_queuing_kernel (int *nodePtrs, int *nodeNeighbors, int *nodeVisited, int *currLevelNodes, int *nextLevelNodes, const unsigned int numCurrLevelNodes, int *numNextLevelNodes) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= numCurrLevelNodes) return; int node = currLevelNodes[idx]; // Loop over all neighbors of the node for(int nbrIdx = nodePtrs[node]; nbrIdx <nodePtrs[node + 1]; nbrIdx++){ int neighbor = nodeNeighbors[nbrIdx]; if (nodeVisited[neighbor]==0){ // Mark it and add it to the queue nodeVisited[neighbor] = 1; int nextId = atomicAdd(numNextLevelNodes,1); nextLevelNodes[nextId] = neighbor; } } } // Block queuing stub __global__ void gpu_block_queuing_kernel( int *nodePtrs, int *nodeNeighbors, int *nodeVisited, int *currLevelNodes, int *nextLevelNodes, const unsigned int numCurrLevelNodes, int *numNextLevelNodes) { __shared__ int nextLevelNodes_private[BQ_CAPACITY]; __shared__ int numNextLevelNodes_private; if (threadIdx.x == 0) numNextLevelNodes_private = 0; __syncthreads(); int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= numCurrLevelNodes) return; int node = currLevelNodes[idx]; // Loop over all neighbors of the node for(int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; nbrIdx++){ int neighbor = nodeNeighbors[nbrIdx]; if (nodeVisited[neighbor] == 0){ // Mark it and add it to the queue nodeVisited[neighbor] = 1; int nextId = atomicAdd(&numNextLevelNodes_private,1); nextLevelNodes_private[nextId] = neighbor; } } __syncthreads(); if (threadIdx.x==0) { int nextId = atomicAdd(numNextLevelNodes,numNextLevelNodes_private); for( int i = 0; i < numNextLevelNodes_private; i++){ nextLevelNodes[nextId+i] = nextLevelNodes_private[i]; } } } // Host function for global queuing invocation void gpu_global_queuing(int *nodePtrs, int *nodeNeighbors, int *nodeVisited, int *currLevelNodes, int *nextLevelNodes, unsigned int numCurrLevelNodes, int *numNextLevelNodes) { const unsigned int numBlocks = 45; hipLaunchKernelGGL(( gpu_global_queuing_kernel), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } // Host function for block queuing invocation void gpu_block_queuing(int *nodePtrs, int *nodeNeighbors, int *nodeVisited, int *currLevelNodes, int *nextLevelNodes, unsigned int numCurrLevelNodes, int *numNextLevelNodes) { const unsigned int numBlocks = 45; hipLaunchKernelGGL(( gpu_block_queuing_kernel), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } int main(int argc, char *argv[]) { // Variables int numNodes; int *nodePtrs_h; int *nodeNeighbors_h; int *nodeVisited_h; int numTotalNeighbors_h; int *currLevelNodes_h; int *nextLevelNodes_h; int numCurrLevelNodes; int numNextLevelNodes_h; int *nodePtrs_d; int *nodeNeighbors_d; int *nodeVisited_d; int *currLevelNodes_d; int *nextLevelNodes_d; int *numNextLevelNodes_d; enum Mode { GPU_GLOBAL_QUEUE = 2, GPU_BLOCK_QUEUE }; wbArg_t args = wbArg_read(argc, argv); Mode mode = (Mode)wbImport_flag(wbArg_getInputFile(args, 0)); nodePtrs_h = (int *)wbImport(wbArg_getInputFile(args, 1), &numNodes, "Integer"); nodeNeighbors_h = (int *)wbImport(wbArg_getInputFile(args, 2), &numTotalNeighbors_h, "Integer"); nodeVisited_h = (int *)wbImport(wbArg_getInputFile(args, 3), &numNodes, "Integer"); currLevelNodes_h = (int *)wbImport(wbArg_getInputFile(args, 4), &numCurrLevelNodes, "Integer"); // (do not modify) Datasets should be consistent if (nodePtrs_h[numNodes] != numTotalNeighbors_h) { wbLog(ERROR, "Datasets are inconsistent! Please report this."); } // (do not modify) Prepare next level containers (i.e. output variables) numNextLevelNodes_h = 0; nextLevelNodes_h = (int *)malloc((numNodes) * sizeof(int)); wbLog(TRACE, "# Mode = ", mode); wbLog(TRACE, "# Nodes = ", numNodes); wbLog(TRACE, "# Total Neighbors = ", numTotalNeighbors_h); wbLog(TRACE, "# Current Level Nodes = ", numCurrLevelNodes); // (do not modify) Allocate device variables -------------------------- wbLog(TRACE, "Allocating device variables..."); wbCheck(hipMalloc((void **)&nodePtrs_d, (numNodes + 1) * sizeof(int))); wbCheck(hipMalloc((void **)&nodeVisited_d, numNodes * sizeof(int))); wbCheck(hipMalloc((void **)&nodeNeighbors_d, nodePtrs_h[numNodes] * sizeof(int))); wbCheck(hipMalloc((void **)&currLevelNodes_d, numCurrLevelNodes * sizeof(int))); wbCheck(hipMalloc((void **)&numNextLevelNodes_d, sizeof(int))); wbCheck( hipMalloc((void **)&nextLevelNodes_d, (numNodes) * sizeof(int))); wbCheck(hipDeviceSynchronize()); // (do not modify) Copy host variables to device -------------------- wbLog(TRACE, "Copying data from host to device..."); wbCheck(hipMemcpy(nodePtrs_d, nodePtrs_h, (numNodes + 1) * sizeof(int), hipMemcpyHostToDevice)); wbCheck(hipMemcpy(nodeVisited_d, nodeVisited_h, numNodes * sizeof(int), hipMemcpyHostToDevice)); wbCheck(hipMemcpy(nodeNeighbors_d, nodeNeighbors_h, nodePtrs_h[numNodes] * sizeof(int), hipMemcpyHostToDevice)); wbCheck(hipMemcpy(currLevelNodes_d, currLevelNodes_h, numCurrLevelNodes * sizeof(int), hipMemcpyHostToDevice)); wbCheck(hipMemset(numNextLevelNodes_d, 0, sizeof(int))); wbCheck(hipDeviceSynchronize()); // (do not modify) Launch kernel ---------------------------------------- wbLog(INFO, "Launching kernel "); if (mode == GPU_GLOBAL_QUEUE) { wbLog(INFO, "(GPU with global queuing)..."); gpu_global_queuing(nodePtrs_d, nodeNeighbors_d, nodeVisited_d, currLevelNodes_d, nextLevelNodes_d, numCurrLevelNodes, numNextLevelNodes_d); wbCheck(hipDeviceSynchronize()); } else if (mode == GPU_BLOCK_QUEUE) { wbLog(INFO, "(GPU with block and global queuing)..."); gpu_block_queuing(nodePtrs_d, nodeNeighbors_d, nodeVisited_d, currLevelNodes_d, nextLevelNodes_d, numCurrLevelNodes, numNextLevelNodes_d); wbCheck(hipDeviceSynchronize()); } else { wbLog(ERROR, "Invalid mode!\n"); exit(0); } // (do not modify) Copy device variables from host ---------------------- wbLog(INFO, "Copying data from device to host..."); wbCheck(hipMemcpy(&numNextLevelNodes_h, numNextLevelNodes_d, sizeof(int), hipMemcpyDeviceToHost)); wbCheck(hipMemcpy(nextLevelNodes_h, nextLevelNodes_d, numNodes * sizeof(int), hipMemcpyDeviceToHost)); wbCheck(hipMemcpy(nodeVisited_h, nodeVisited_d, numNodes * sizeof(int), hipMemcpyDeviceToHost)); wbCheck(hipDeviceSynchronize()); // (do not modify) Verify correctness // ------------------------------------- // Only check that the visited nodes match the reference implementation wbSolution(args, nodeVisited_h, numNodes); // (do not modify) Free memory // ------------------------------------------------------------ free(nodePtrs_h); free(nodeVisited_h); free(nodeNeighbors_h); free(currLevelNodes_h); free(nextLevelNodes_h); wbCheck(hipFree(nodePtrs_d)); wbCheck(hipFree(nodeVisited_d)); wbCheck(hipFree(nodeNeighbors_d)); wbCheck(hipFree(currLevelNodes_d)); wbCheck(hipFree(numNextLevelNodes_d)); wbCheck(hipFree(nextLevelNodes_d)); return 0; }
55e794eaed383ee0da00ab4cfd42e21beb0378dc.cu
#include <wb.h> #define BLOCK_SIZE 512 // Maximum number of elements that can be inserted into a block queue #define BQ_CAPACITY 2048 #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while (0) // Global queuing stub __global__ void gpu_global_queuing_kernel (int *nodePtrs, int *nodeNeighbors, int *nodeVisited, int *currLevelNodes, int *nextLevelNodes, const unsigned int numCurrLevelNodes, int *numNextLevelNodes) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= numCurrLevelNodes) return; int node = currLevelNodes[idx]; // Loop over all neighbors of the node for(int nbrIdx = nodePtrs[node]; nbrIdx <nodePtrs[node + 1]; nbrIdx++){ int neighbor = nodeNeighbors[nbrIdx]; if (nodeVisited[neighbor]==0){ // Mark it and add it to the queue nodeVisited[neighbor] = 1; int nextId = atomicAdd(numNextLevelNodes,1); nextLevelNodes[nextId] = neighbor; } } } // Block queuing stub __global__ void gpu_block_queuing_kernel( int *nodePtrs, int *nodeNeighbors, int *nodeVisited, int *currLevelNodes, int *nextLevelNodes, const unsigned int numCurrLevelNodes, int *numNextLevelNodes) { __shared__ int nextLevelNodes_private[BQ_CAPACITY]; __shared__ int numNextLevelNodes_private; if (threadIdx.x == 0) numNextLevelNodes_private = 0; __syncthreads(); int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= numCurrLevelNodes) return; int node = currLevelNodes[idx]; // Loop over all neighbors of the node for(int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; nbrIdx++){ int neighbor = nodeNeighbors[nbrIdx]; if (nodeVisited[neighbor] == 0){ // Mark it and add it to the queue nodeVisited[neighbor] = 1; int nextId = atomicAdd(&numNextLevelNodes_private,1); nextLevelNodes_private[nextId] = neighbor; } } __syncthreads(); if (threadIdx.x==0) { int nextId = atomicAdd(numNextLevelNodes,numNextLevelNodes_private); for( int i = 0; i < numNextLevelNodes_private; i++){ nextLevelNodes[nextId+i] = nextLevelNodes_private[i]; } } } // Host function for global queuing invocation void gpu_global_queuing(int *nodePtrs, int *nodeNeighbors, int *nodeVisited, int *currLevelNodes, int *nextLevelNodes, unsigned int numCurrLevelNodes, int *numNextLevelNodes) { const unsigned int numBlocks = 45; gpu_global_queuing_kernel<<<numBlocks, BLOCK_SIZE>>>( nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } // Host function for block queuing invocation void gpu_block_queuing(int *nodePtrs, int *nodeNeighbors, int *nodeVisited, int *currLevelNodes, int *nextLevelNodes, unsigned int numCurrLevelNodes, int *numNextLevelNodes) { const unsigned int numBlocks = 45; gpu_block_queuing_kernel<<<numBlocks, BLOCK_SIZE>>>( nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } int main(int argc, char *argv[]) { // Variables int numNodes; int *nodePtrs_h; int *nodeNeighbors_h; int *nodeVisited_h; int numTotalNeighbors_h; int *currLevelNodes_h; int *nextLevelNodes_h; int numCurrLevelNodes; int numNextLevelNodes_h; int *nodePtrs_d; int *nodeNeighbors_d; int *nodeVisited_d; int *currLevelNodes_d; int *nextLevelNodes_d; int *numNextLevelNodes_d; enum Mode { GPU_GLOBAL_QUEUE = 2, GPU_BLOCK_QUEUE }; wbArg_t args = wbArg_read(argc, argv); Mode mode = (Mode)wbImport_flag(wbArg_getInputFile(args, 0)); nodePtrs_h = (int *)wbImport(wbArg_getInputFile(args, 1), &numNodes, "Integer"); nodeNeighbors_h = (int *)wbImport(wbArg_getInputFile(args, 2), &numTotalNeighbors_h, "Integer"); nodeVisited_h = (int *)wbImport(wbArg_getInputFile(args, 3), &numNodes, "Integer"); currLevelNodes_h = (int *)wbImport(wbArg_getInputFile(args, 4), &numCurrLevelNodes, "Integer"); // (do not modify) Datasets should be consistent if (nodePtrs_h[numNodes] != numTotalNeighbors_h) { wbLog(ERROR, "Datasets are inconsistent! Please report this."); } // (do not modify) Prepare next level containers (i.e. output variables) numNextLevelNodes_h = 0; nextLevelNodes_h = (int *)malloc((numNodes) * sizeof(int)); wbLog(TRACE, "# Mode = ", mode); wbLog(TRACE, "# Nodes = ", numNodes); wbLog(TRACE, "# Total Neighbors = ", numTotalNeighbors_h); wbLog(TRACE, "# Current Level Nodes = ", numCurrLevelNodes); // (do not modify) Allocate device variables -------------------------- wbLog(TRACE, "Allocating device variables..."); wbCheck(cudaMalloc((void **)&nodePtrs_d, (numNodes + 1) * sizeof(int))); wbCheck(cudaMalloc((void **)&nodeVisited_d, numNodes * sizeof(int))); wbCheck(cudaMalloc((void **)&nodeNeighbors_d, nodePtrs_h[numNodes] * sizeof(int))); wbCheck(cudaMalloc((void **)&currLevelNodes_d, numCurrLevelNodes * sizeof(int))); wbCheck(cudaMalloc((void **)&numNextLevelNodes_d, sizeof(int))); wbCheck( cudaMalloc((void **)&nextLevelNodes_d, (numNodes) * sizeof(int))); wbCheck(cudaDeviceSynchronize()); // (do not modify) Copy host variables to device -------------------- wbLog(TRACE, "Copying data from host to device..."); wbCheck(cudaMemcpy(nodePtrs_d, nodePtrs_h, (numNodes + 1) * sizeof(int), cudaMemcpyHostToDevice)); wbCheck(cudaMemcpy(nodeVisited_d, nodeVisited_h, numNodes * sizeof(int), cudaMemcpyHostToDevice)); wbCheck(cudaMemcpy(nodeNeighbors_d, nodeNeighbors_h, nodePtrs_h[numNodes] * sizeof(int), cudaMemcpyHostToDevice)); wbCheck(cudaMemcpy(currLevelNodes_d, currLevelNodes_h, numCurrLevelNodes * sizeof(int), cudaMemcpyHostToDevice)); wbCheck(cudaMemset(numNextLevelNodes_d, 0, sizeof(int))); wbCheck(cudaDeviceSynchronize()); // (do not modify) Launch kernel ---------------------------------------- wbLog(INFO, "Launching kernel "); if (mode == GPU_GLOBAL_QUEUE) { wbLog(INFO, "(GPU with global queuing)..."); gpu_global_queuing(nodePtrs_d, nodeNeighbors_d, nodeVisited_d, currLevelNodes_d, nextLevelNodes_d, numCurrLevelNodes, numNextLevelNodes_d); wbCheck(cudaDeviceSynchronize()); } else if (mode == GPU_BLOCK_QUEUE) { wbLog(INFO, "(GPU with block and global queuing)..."); gpu_block_queuing(nodePtrs_d, nodeNeighbors_d, nodeVisited_d, currLevelNodes_d, nextLevelNodes_d, numCurrLevelNodes, numNextLevelNodes_d); wbCheck(cudaDeviceSynchronize()); } else { wbLog(ERROR, "Invalid mode!\n"); exit(0); } // (do not modify) Copy device variables from host ---------------------- wbLog(INFO, "Copying data from device to host..."); wbCheck(cudaMemcpy(&numNextLevelNodes_h, numNextLevelNodes_d, sizeof(int), cudaMemcpyDeviceToHost)); wbCheck(cudaMemcpy(nextLevelNodes_h, nextLevelNodes_d, numNodes * sizeof(int), cudaMemcpyDeviceToHost)); wbCheck(cudaMemcpy(nodeVisited_h, nodeVisited_d, numNodes * sizeof(int), cudaMemcpyDeviceToHost)); wbCheck(cudaDeviceSynchronize()); // (do not modify) Verify correctness // ------------------------------------- // Only check that the visited nodes match the reference implementation wbSolution(args, nodeVisited_h, numNodes); // (do not modify) Free memory // ------------------------------------------------------------ free(nodePtrs_h); free(nodeVisited_h); free(nodeNeighbors_h); free(currLevelNodes_h); free(nextLevelNodes_h); wbCheck(cudaFree(nodePtrs_d)); wbCheck(cudaFree(nodeVisited_d)); wbCheck(cudaFree(nodeNeighbors_d)); wbCheck(cudaFree(currLevelNodes_d)); wbCheck(cudaFree(numNextLevelNodes_d)); wbCheck(cudaFree(nextLevelNodes_d)); return 0; }
3d9f62ef548e35848642590aa5dd26a5c8a1e799.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "stdio.h" #include <vector> #include <hip/hip_runtime.h> #include "math.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> #include <thrust/transform.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/zip_iterator.h> #include "../cuPrintf.cu" struct PrintStruct { __device__ int operator() (int x) { printf("x = %i\n", x); return 0; } __device__ int operator() (thrust::tuple<int, int *> x) const { int x_1 = thrust::get<0>(x); int * x_2 = thrust::get<1>(x); printf("x_1 = %i, x_2[%i] = %i\n", x_1, x_1, x_2[x_1]); return 0; } __device__ int operator() (thrust::tuple<int *, int> x) const { int position = thrust::get<1>(x); int * x_1 = thrust::get<0>(x) + 2 * position; int * x_2 = thrust::get<0>(x) + 2 * position + 1; printf("x_2a = %i \nx_2b = %i\n", x_1[0], x_2[0]); return 0; } }; int main (int argc, char** argv) { // part 1 int result = thrust::transform_reduce(thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(10), PrintStruct(), 0, thrust::plus<int>()); if (result != 0) std::cout << result << std::endl; // part 2 int numberOfData = 10; srand(23); thrust::host_vector<int> h_vec(numberOfData); int h_array[numberOfData]; for (int i = 0; i < numberOfData; i++) { h_vec[i] = rand() % 100; h_array[i] = rand() % 100; } int * d_array; hipMalloc((void**) &d_array, numberOfData*sizeof(int*)); hipMemcpy(d_array, &h_array, numberOfData*sizeof(int*), hipMemcpyHostToDevice); thrust::constant_iterator<int *> constIt(d_array); result = thrust::transform_reduce( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_counting_iterator<int>(0), constIt ) ), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_counting_iterator<int>(numberOfData), constIt ) ), PrintStruct(), 0, thrust::plus<int>() ); if (result != 0) std::cout << result << std::endl; // part 3 result = thrust::transform_reduce( thrust::make_zip_iterator( thrust::make_tuple( constIt, thrust::make_counting_iterator<int>(0) ) ), thrust::make_zip_iterator( thrust::make_tuple( constIt + numberOfData/2, thrust::make_counting_iterator<int>(numberOfData/2) ) ), PrintStruct(), 0, thrust::plus<int>() ); if (result != 0) std::cout << result << std::endl; return 1; }
3d9f62ef548e35848642590aa5dd26a5c8a1e799.cu
#include <iostream> #include "stdio.h" #include <vector> #include <cuda.h> #include "math.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> #include <thrust/transform.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/zip_iterator.h> #include "../cuPrintf.cu" struct PrintStruct { __device__ int operator() (int x) { printf("x = %i\n", x); return 0; } __device__ int operator() (thrust::tuple<int, int *> x) const { int x_1 = thrust::get<0>(x); int * x_2 = thrust::get<1>(x); printf("x_1 = %i, x_2[%i] = %i\n", x_1, x_1, x_2[x_1]); return 0; } __device__ int operator() (thrust::tuple<int *, int> x) const { int position = thrust::get<1>(x); int * x_1 = thrust::get<0>(x) + 2 * position; int * x_2 = thrust::get<0>(x) + 2 * position + 1; printf("x_2a = %i \nx_2b = %i\n", x_1[0], x_2[0]); return 0; } }; int main (int argc, char** argv) { // part 1 int result = thrust::transform_reduce(thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(10), PrintStruct(), 0, thrust::plus<int>()); if (result != 0) std::cout << result << std::endl; // part 2 int numberOfData = 10; srand(23); thrust::host_vector<int> h_vec(numberOfData); int h_array[numberOfData]; for (int i = 0; i < numberOfData; i++) { h_vec[i] = rand() % 100; h_array[i] = rand() % 100; } int * d_array; cudaMalloc((void**) &d_array, numberOfData*sizeof(int*)); cudaMemcpy(d_array, &h_array, numberOfData*sizeof(int*), cudaMemcpyHostToDevice); thrust::constant_iterator<int *> constIt(d_array); result = thrust::transform_reduce( thrust::make_zip_iterator( thrust::make_tuple( thrust::make_counting_iterator<int>(0), constIt ) ), thrust::make_zip_iterator( thrust::make_tuple( thrust::make_counting_iterator<int>(numberOfData), constIt ) ), PrintStruct(), 0, thrust::plus<int>() ); if (result != 0) std::cout << result << std::endl; // part 3 result = thrust::transform_reduce( thrust::make_zip_iterator( thrust::make_tuple( constIt, thrust::make_counting_iterator<int>(0) ) ), thrust::make_zip_iterator( thrust::make_tuple( constIt + numberOfData/2, thrust::make_counting_iterator<int>(numberOfData/2) ) ), PrintStruct(), 0, thrust::plus<int>() ); if (result != 0) std::cout << result << std::endl; return 1; }
b2111e41d56587a74608c172a01a74a3ca4f1464.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************* * CUDAK2NN.cu * CUDAK2NN * * Author: Kareem Omar * [email protected] * https://github.com/komrad36 * * Last updated Oct 12, 2016 *******************************************************************/ // // Fastest GPU implementation of a brute-force // matcher for 512-bit binary descriptors // in 2NN mode, i.e., a match is returned if the best // match between a query vector and a training vector // is more than a certain threshold number of bits // better than the second-best match. // // Yes, that means the DIFFERENCE in popcounts is used // for thresholding, NOT the ratio. This is the CORRECT // approach for binary descriptors. // // This laboriously crafted kernel is EXTREMELY fast. // 63 BILLION comparisons per second on a stock GTX1080, // enough to match nearly 46,000 descriptors per frame at 30 fps (!) // // A key insight responsible for much of the performance of // this insanely fast CUDA kernel is due to // Christopher Parker (https://github.com/csp256), to whom // I am extremely grateful. // // CUDA CC 3.0 or higher is required. // // All functionality is contained in the files CUDAK2NN.h // and CUDAK2NN.cu. 'main.cpp' is simply a sample test harness // with example usage and performance testing. // #include "coloc/CUDAK2NN.h" #include <stdio.h> __global__ void #ifndef __INTELLISENSE__ __launch_bounds__(256, 0) #endif CUDAK2NN_kernel(const hipTextureObject_t tex_q, const int num_q, const uint64_t* __restrict__ g_training, const int num_t, int* const __restrict__ g_match, const uint8_t threshold) { uint64_t train = *(g_training += threadIdx.x & 7); g_training += 8; uint64_t q[8]; for (int i = 0, offset = ((threadIdx.x & 24) << 3) + (threadIdx.x & 7) + (blockIdx.x << 11) + (threadIdx.y << 8); i < 8; ++i, offset += 8) { const uint2 buf = tex1Dfetch<uint2>(tex_q, offset); asm("mov.b64 %0, {%1,%2};" : "=l"(q[i]) : "r"(buf.x), "r"(buf.y)); // some assembly required } int best_i, best_v = 100000, second_v = 200000; #pragma unroll 6 for (int t = 0; t < num_t; ++t, g_training += 8) { uint32_t dist[4]; for (int i = 0; i < 4; ++i) dist[i] = __byte_perm(__popcll(q[i] ^ train), __popcll(q[i + 4] ^ train), 0x5410); for (int i = 0; i < 4; ++i) dist[i] += __shfl_xor(dist[i], 1); train = *g_training; if (threadIdx.x & 1) dist[0] = dist[1]; if (threadIdx.x & 1) dist[2] = dist[3]; dist[0] += __shfl_xor(dist[0], 2); dist[2] += __shfl_xor(dist[2], 2); if (threadIdx.x & 2) dist[0] = dist[2]; dist[0] = __byte_perm(dist[0] + __shfl_xor(dist[0], 4), 0, threadIdx.x & 4 ? 0x5432 : 0x5410); second_v = min(dist[0], second_v); if (dist[0] < best_v) { second_v = best_v; best_i = t; best_v = dist[0]; } } const int idx = (blockIdx.x << 8) + (threadIdx.y << 5) + threadIdx.x; if (idx < num_q) g_match[idx] = second_v - best_v > threshold ? best_i : -1; } void CUDAK2NN(const void* const __restrict d_t, const int num_t, const hipTextureObject_t tex_q, const int num_q, int* const __restrict d_m, const int threshold) { hipLaunchKernelGGL(( CUDAK2NN_kernel), dim3(((num_q - 1) >> 8) + 1), dim3({ 32), 8 }, 0, tex_q, num_q, reinterpret_cast<const uint64_t*>(d_t), num_t, d_m, threshold); hipDeviceSynchronize(); }
b2111e41d56587a74608c172a01a74a3ca4f1464.cu
/******************************************************************* * CUDAK2NN.cu * CUDAK2NN * * Author: Kareem Omar * [email protected] * https://github.com/komrad36 * * Last updated Oct 12, 2016 *******************************************************************/ // // Fastest GPU implementation of a brute-force // matcher for 512-bit binary descriptors // in 2NN mode, i.e., a match is returned if the best // match between a query vector and a training vector // is more than a certain threshold number of bits // better than the second-best match. // // Yes, that means the DIFFERENCE in popcounts is used // for thresholding, NOT the ratio. This is the CORRECT // approach for binary descriptors. // // This laboriously crafted kernel is EXTREMELY fast. // 63 BILLION comparisons per second on a stock GTX1080, // enough to match nearly 46,000 descriptors per frame at 30 fps (!) // // A key insight responsible for much of the performance of // this insanely fast CUDA kernel is due to // Christopher Parker (https://github.com/csp256), to whom // I am extremely grateful. // // CUDA CC 3.0 or higher is required. // // All functionality is contained in the files CUDAK2NN.h // and CUDAK2NN.cu. 'main.cpp' is simply a sample test harness // with example usage and performance testing. // #include "coloc/CUDAK2NN.h" #include <stdio.h> __global__ void #ifndef __INTELLISENSE__ __launch_bounds__(256, 0) #endif CUDAK2NN_kernel(const cudaTextureObject_t tex_q, const int num_q, const uint64_t* __restrict__ g_training, const int num_t, int* const __restrict__ g_match, const uint8_t threshold) { uint64_t train = *(g_training += threadIdx.x & 7); g_training += 8; uint64_t q[8]; for (int i = 0, offset = ((threadIdx.x & 24) << 3) + (threadIdx.x & 7) + (blockIdx.x << 11) + (threadIdx.y << 8); i < 8; ++i, offset += 8) { const uint2 buf = tex1Dfetch<uint2>(tex_q, offset); asm("mov.b64 %0, {%1,%2};" : "=l"(q[i]) : "r"(buf.x), "r"(buf.y)); // some assembly required } int best_i, best_v = 100000, second_v = 200000; #pragma unroll 6 for (int t = 0; t < num_t; ++t, g_training += 8) { uint32_t dist[4]; for (int i = 0; i < 4; ++i) dist[i] = __byte_perm(__popcll(q[i] ^ train), __popcll(q[i + 4] ^ train), 0x5410); for (int i = 0; i < 4; ++i) dist[i] += __shfl_xor(dist[i], 1); train = *g_training; if (threadIdx.x & 1) dist[0] = dist[1]; if (threadIdx.x & 1) dist[2] = dist[3]; dist[0] += __shfl_xor(dist[0], 2); dist[2] += __shfl_xor(dist[2], 2); if (threadIdx.x & 2) dist[0] = dist[2]; dist[0] = __byte_perm(dist[0] + __shfl_xor(dist[0], 4), 0, threadIdx.x & 4 ? 0x5432 : 0x5410); second_v = min(dist[0], second_v); if (dist[0] < best_v) { second_v = best_v; best_i = t; best_v = dist[0]; } } const int idx = (blockIdx.x << 8) + (threadIdx.y << 5) + threadIdx.x; if (idx < num_q) g_match[idx] = second_v - best_v > threshold ? best_i : -1; } void CUDAK2NN(const void* const __restrict d_t, const int num_t, const cudaTextureObject_t tex_q, const int num_q, int* const __restrict d_m, const int threshold) { CUDAK2NN_kernel<<<((num_q - 1) >> 8) + 1, { 32, 8 }>>>(tex_q, num_q, reinterpret_cast<const uint64_t*>(d_t), num_t, d_m, threshold); cudaDeviceSynchronize(); }
ff1cbf7f30dfad5341bff2ba4677336a302e5964.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> // std::ifstream #include <sstream> // std::stringstream #include <string> // std::string, std::stoi #include <cstring> // std::strcmp #include <cmath> #include <vector> #include <chrono> #include <ctime> #include "Graph.hpp" #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/count.h> #define GPU 1 using namespace CSC586C::gpu_graph; extern const double damping_factor = 0.85; extern const unsigned max_iterations = 100; extern const double tolerance = 1e-10; const int blocksize = 512; // Read Input (pairs of source and destination links) from file with format: // src_index dest_index // ... // src_index dest_index ColdEdge ReadInputFromTextFile(const char* input_file, unsigned& num_vertices) { std::ifstream myfile (input_file); ColdEdge edges; unsigned source, destination; if (myfile.is_open()) { while(myfile >> source >> destination) { unsigned larger = (source > destination)? source : destination; num_vertices = (num_vertices > larger)? num_vertices : larger; edges.src.push_back(source); edges.dest.push_back(destination); } ++num_vertices; myfile.close(); } return edges; } bool ToleranceCheck(const unsigned& num_v, HotData& hotData) { // Sum up the pagerank double pr_sum = 0.0; for (unsigned i = 0; i < num_v; i++) { pr_sum += hotData.pagerank[i]; } // Calculate the cur_toleranceor pr_sum = 1.0 / pr_sum; double cur_tolerance = 0.0; for (unsigned i = 0; i < num_v; i++) { hotData.pagerank[i] *= pr_sum; // norm 1 cur_tolerance += ::fabs(hotData.pagerank[i] - hotData.pre_pagerank[i]); } if (cur_tolerance < tolerance) { std::cout << "Current toleranceor: " << cur_tolerance << std::endl; return true; } return false; } #ifdef GPU __global__ void update_pagerank( int *ingoing_edges_num, int *outgoing_edges_num, int *begin_index, int *adj_edges, double *pre_pagerank, double pr_dangling, double* pr_random, double *pagerank, double* dangling_vec, size_t n ) { int const index = threadIdx.x + blockIdx.x * blockDim.x; if( index < n ) { int num_edges = ingoing_edges_num[index]; int begin_index_ = begin_index[index]; pagerank[index] = 0.0; for( int i = 0; i < num_edges; ++i ){ int inward_edge_index = adj_edges[begin_index_ + i]; double pr_eigenvector = 0.85 * pre_pagerank[inward_edge_index] / outgoing_edges_num[inward_edge_index]; pagerank[index] += pr_eigenvector; } pagerank[index] += (*pr_random + pr_dangling); dangling_vec[index] = pagerank[index] * (outgoing_edges_num[index] == 0); } } #endif void PageRank(GPU_Graph *graph) { const unsigned num_v = graph->VertexesNum(); double init_rank = double(1.0 / num_v); double pr_random = (1.0 - damping_factor) / num_v; unsigned iter = 0; #ifdef GPU // calculate number of blocks. block_size is fixed to 512 auto const num_blocks = ::ceil( num_v / static_cast< float >( blocksize) ); //Initialize all memories used by GPU int *dev_ingoing_edge_nums; int *dev_outgoing_edge_nums; int *dev_begin_index; int *dev_adj_edges; double *dev_pre_pagerank; double *dev_pagerank; double *dev_pr_random; double *dev_dangling_vec; //Allocate memory for elments mapped to GPU hipMalloc( (void **) &dev_ingoing_edge_nums, num_v*sizeof(int) ); hipMalloc( (void **) &dev_outgoing_edge_nums, num_v*sizeof(int) ); hipMalloc( (void **) &dev_begin_index, num_v*sizeof(int) ); hipMalloc( (void **) &dev_adj_edges, graph->num_edges*sizeof(int) ); hipMalloc( (void **) &dev_pagerank, num_v*sizeof(double) ); hipMalloc( (void **) &dev_pre_pagerank, num_v*sizeof(double) ); hipMalloc( (void **) &dev_pr_random, sizeof(double) ); hipMalloc( (void **) &dev_dangling_vec, num_v*sizeof(double) ); //Define trust objects and wrap raw pointers with device pointers thrust::device_ptr<double> dev_thrust_dangling_vec(dev_dangling_vec); //Initialize obejcts that won't be changed in the algorithm hipMemcpy( dev_ingoing_edge_nums, graph->ingoing_edges_num.data(), num_v*sizeof(int), hipMemcpyHostToDevice ); hipMemcpy( dev_outgoing_edge_nums, graph->hotData.outgoing_edges_num.data(), num_v*sizeof(int), hipMemcpyHostToDevice ); hipMemcpy( dev_begin_index, graph->beginIndex.data(), num_v*sizeof(int), hipMemcpyHostToDevice ); hipMemcpy( dev_adj_edges, graph->adjE, graph->num_edges*sizeof(int), hipMemcpyHostToDevice ); hipMemcpy( dev_pr_random, &pr_random, sizeof(double), hipMemcpyHostToDevice ); //Initialize objects that will be updated in the algorithm, and copy //them from host to device graph->hotData.pagerank.assign(num_v, init_rank); graph->hotData.pre_pagerank.assign(num_v, 0); hipMemcpy( dev_pre_pagerank, graph->hotData.pre_pagerank.data(), num_v*sizeof(double), hipMemcpyHostToDevice ); hipMemcpy( dev_pagerank, graph->hotData.pagerank.data(), num_v*sizeof(double), hipMemcpyHostToDevice ); double dangling_pr_sum = 0.0; for (unsigned i = 0; i < num_v; i++) { dangling_pr_sum += graph->hotData.pagerank[i] * (graph->hotData.outgoing_edges_num[i] == 0); } while(iter++ < max_iterations){ double pr_dangling = damping_factor * dangling_pr_sum / num_v; hipMemcpy( dev_pre_pagerank, dev_pagerank, num_v*sizeof(double), hipMemcpyDeviceToDevice ); //Main function in this algorithm to update pagerank at each iteration, hand over to GPU hipLaunchKernelGGL(( update_pagerank), dim3(num_blocks), dim3(blocksize) , 0, 0, dev_ingoing_edge_nums, dev_outgoing_edge_nums, dev_begin_index, dev_adj_edges, dev_pre_pagerank, pr_dangling, dev_pr_random, dev_pagerank, dev_dangling_vec, num_v); dangling_pr_sum = thrust::reduce( dev_thrust_dangling_vec, dev_thrust_dangling_vec+num_v ); } hipMemcpy( graph->hotData.pagerank.data(), dev_pagerank, num_v*sizeof(double), hipMemcpyDeviceToHost ); hipFree( dev_ingoing_edge_nums ); hipFree( dev_outgoing_edge_nums ); hipFree( dev_begin_index ); hipFree( dev_adj_edges ); hipFree( dev_pagerank ); hipFree( dev_pre_pagerank ); hipFree( dev_pr_random ); hipFree( dev_dangling_vec ); #endif #if 0 // This is the original algorithm in CPU which we port to GPU for (unsigned i = 0; i < num_v; i++) { graph->hotData.pagerank[i] = init_rank; graph->hotData.pre_pagerank[i] = 0.0; } while (iter++ < max_iterations) { double dangling_pr_sum = 0.0; // Update the pagerank values in every iteration for (unsigned i = 0; i < num_v; i++) { graph->hotData.pre_pagerank[i] = graph->hotData.pagerank[i]; graph->hotData.pagerank[i] = 0.0; dangling_pr_sum += graph->hotData.pre_pagerank[i] * (graph->hotData.outgoing_edges_num[i] == 0); } double pr_dangling = damping_factor * dangling_pr_sum / num_v; // Iterater all the vertexes and calculate its adjacency function l(pi,pj) of all inward links // Update its pagerank value by adding pr_eigenvector from its inward links separately for( int i = 0; i < num_v; ++i ) { unsigned inward_edges_num = graph->ingoing_edges_num[i]; int begin_index = graph->beginIndex[i]; for( int j = 0; j < inward_edges_num; ++j){ unsigned inward_edge_index = graph->adjE[begin_index + j]; double pr_eigenvector = damping_factor * graph->hotData.pre_pagerank[inward_edge_index] / graph->hotData.outgoing_edges_num[inward_edge_index]; graph->hotData.pagerank[i] += pr_eigenvector; } graph->hotData.pagerank[i] += (pr_random + pr_dangling); } // finish when cur_toleranceor is smaller than tolerance we set if(ToleranceCheck(num_v, graph->hotData)) { std::cout << "Iteration time: " << iter << std::endl; break; } } #endif } void printFinalResults(GPU_Graph* graph) { std::cout << "PageRank values: \n"; for(int i = 0; i < graph->VertexesNum(); ++i) { std::cout << "The index is: " << i << " with value " << graph->hotData.pagerank[i] << '\n'; } std::cout<<'\n'; } void PrintBenchmark(std::chrono::time_point<std::chrono::steady_clock> start_t, std::chrono::time_point<std::chrono::steady_clock> const end_t, const unsigned loop_t) { auto const avg_time = std::chrono::duration_cast<std::chrono::microseconds>( end_t - start_t ).count() / double(loop_t); std::cout << "Average total running time = " << avg_time << " us" << std::endl; } int main(int argc, char *argv[]) { unsigned loop_times = 10; unsigned num_vertices = 0; if(argc >= 4) { const char* test_mode = argv[2]; ColdEdge input = ReadInputFromTextFile(argv[1], num_vertices); if(std::strcmp(test_mode, "total") == 0) { auto const start_time = std::chrono::steady_clock::now(); for (int i = 0; i < loop_times; i++) { GPU_Graph graph(num_vertices, input); PageRank(&graph); //printFinalResults(&graph); } auto const end_time = std::chrono::steady_clock::now(); PrintBenchmark(start_time, end_time, loop_times); } else if(std::strcmp(test_mode, "graph") == 0 ) { auto const start_time = std::chrono::steady_clock::now(); GPU_Graph graph(num_vertices, input); auto const end_time = std::chrono::steady_clock::now(); PageRank(&graph); PrintBenchmark(start_time, end_time, 1); } else if(std::strcmp(test_mode, "pagerank") == 0) { GPU_Graph graph(num_vertices, input); auto const start_time = std::chrono::steady_clock::now(); for (unsigned i = 0; i < loop_times; i++) { PageRank(&graph); } auto const end_time = std::chrono::steady_clock::now(); PrintBenchmark(start_time, end_time, loop_times); } else { std::cout << "Invalid Input!" << std::endl; std::cout << "Please input the input text file name wanted in argc[1]" << std::endl; std::cout << "Please input the time mode(total/graph/pangerank) to be record in argc[2]" << std::endl; std::cout << "Please input the number of threads wanted to use in argc[3]" << std::endl; } } else if (argc >= 2 && argc < 4) { ColdEdge input = ReadInputFromTextFile(argv[1], num_vertices); GPU_Graph graph(num_vertices, input); auto const start_time = std::chrono::steady_clock::now(); for (int i = 0; i < loop_times; i++) { PageRank(&graph); //printFinalResults(&graph); } auto const end_time = std::chrono::steady_clock::now(); PrintBenchmark(start_time, end_time, loop_times); } else { std::cout << "Invalid Input: " << std::endl; std::cout << "Please input the input text file name wanted in argc[1]" << std::endl; std::cout << "Please input the time mode(total/graph/pangerank) to be record in argc[2]" << std::endl; std::cout << "Please input the number of threads wanted to use in argc[3]" << std::endl; } return 0; }
ff1cbf7f30dfad5341bff2ba4677336a302e5964.cu
#include <iostream> #include <fstream> // std::ifstream #include <sstream> // std::stringstream #include <string> // std::string, std::stoi #include <cstring> // std::strcmp #include <cmath> #include <vector> #include <chrono> #include <ctime> #include "Graph.hpp" #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/count.h> #define GPU 1 using namespace CSC586C::gpu_graph; extern const double damping_factor = 0.85; extern const unsigned max_iterations = 100; extern const double tolerance = 1e-10; const int blocksize = 512; // Read Input (pairs of source and destination links) from file with format: // src_index dest_index // ... // src_index dest_index ColdEdge ReadInputFromTextFile(const char* input_file, unsigned& num_vertices) { std::ifstream myfile (input_file); ColdEdge edges; unsigned source, destination; if (myfile.is_open()) { while(myfile >> source >> destination) { unsigned larger = (source > destination)? source : destination; num_vertices = (num_vertices > larger)? num_vertices : larger; edges.src.push_back(source); edges.dest.push_back(destination); } ++num_vertices; myfile.close(); } return edges; } bool ToleranceCheck(const unsigned& num_v, HotData& hotData) { // Sum up the pagerank double pr_sum = 0.0; for (unsigned i = 0; i < num_v; i++) { pr_sum += hotData.pagerank[i]; } // Calculate the cur_toleranceor pr_sum = 1.0 / pr_sum; double cur_tolerance = 0.0; for (unsigned i = 0; i < num_v; i++) { hotData.pagerank[i] *= pr_sum; // norm 1 cur_tolerance += std::fabs(hotData.pagerank[i] - hotData.pre_pagerank[i]); } if (cur_tolerance < tolerance) { std::cout << "Current toleranceor: " << cur_tolerance << std::endl; return true; } return false; } #ifdef GPU __global__ void update_pagerank( int *ingoing_edges_num, int *outgoing_edges_num, int *begin_index, int *adj_edges, double *pre_pagerank, double pr_dangling, double* pr_random, double *pagerank, double* dangling_vec, size_t n ) { int const index = threadIdx.x + blockIdx.x * blockDim.x; if( index < n ) { int num_edges = ingoing_edges_num[index]; int begin_index_ = begin_index[index]; pagerank[index] = 0.0; for( int i = 0; i < num_edges; ++i ){ int inward_edge_index = adj_edges[begin_index_ + i]; double pr_eigenvector = 0.85 * pre_pagerank[inward_edge_index] / outgoing_edges_num[inward_edge_index]; pagerank[index] += pr_eigenvector; } pagerank[index] += (*pr_random + pr_dangling); dangling_vec[index] = pagerank[index] * (outgoing_edges_num[index] == 0); } } #endif void PageRank(GPU_Graph *graph) { const unsigned num_v = graph->VertexesNum(); double init_rank = double(1.0 / num_v); double pr_random = (1.0 - damping_factor) / num_v; unsigned iter = 0; #ifdef GPU // calculate number of blocks. block_size is fixed to 512 auto const num_blocks = std::ceil( num_v / static_cast< float >( blocksize) ); //Initialize all memories used by GPU int *dev_ingoing_edge_nums; int *dev_outgoing_edge_nums; int *dev_begin_index; int *dev_adj_edges; double *dev_pre_pagerank; double *dev_pagerank; double *dev_pr_random; double *dev_dangling_vec; //Allocate memory for elments mapped to GPU cudaMalloc( (void **) &dev_ingoing_edge_nums, num_v*sizeof(int) ); cudaMalloc( (void **) &dev_outgoing_edge_nums, num_v*sizeof(int) ); cudaMalloc( (void **) &dev_begin_index, num_v*sizeof(int) ); cudaMalloc( (void **) &dev_adj_edges, graph->num_edges*sizeof(int) ); cudaMalloc( (void **) &dev_pagerank, num_v*sizeof(double) ); cudaMalloc( (void **) &dev_pre_pagerank, num_v*sizeof(double) ); cudaMalloc( (void **) &dev_pr_random, sizeof(double) ); cudaMalloc( (void **) &dev_dangling_vec, num_v*sizeof(double) ); //Define trust objects and wrap raw pointers with device pointers thrust::device_ptr<double> dev_thrust_dangling_vec(dev_dangling_vec); //Initialize obejcts that won't be changed in the algorithm cudaMemcpy( dev_ingoing_edge_nums, graph->ingoing_edges_num.data(), num_v*sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( dev_outgoing_edge_nums, graph->hotData.outgoing_edges_num.data(), num_v*sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( dev_begin_index, graph->beginIndex.data(), num_v*sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( dev_adj_edges, graph->adjE, graph->num_edges*sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( dev_pr_random, &pr_random, sizeof(double), cudaMemcpyHostToDevice ); //Initialize objects that will be updated in the algorithm, and copy //them from host to device graph->hotData.pagerank.assign(num_v, init_rank); graph->hotData.pre_pagerank.assign(num_v, 0); cudaMemcpy( dev_pre_pagerank, graph->hotData.pre_pagerank.data(), num_v*sizeof(double), cudaMemcpyHostToDevice ); cudaMemcpy( dev_pagerank, graph->hotData.pagerank.data(), num_v*sizeof(double), cudaMemcpyHostToDevice ); double dangling_pr_sum = 0.0; for (unsigned i = 0; i < num_v; i++) { dangling_pr_sum += graph->hotData.pagerank[i] * (graph->hotData.outgoing_edges_num[i] == 0); } while(iter++ < max_iterations){ double pr_dangling = damping_factor * dangling_pr_sum / num_v; cudaMemcpy( dev_pre_pagerank, dev_pagerank, num_v*sizeof(double), cudaMemcpyDeviceToDevice ); //Main function in this algorithm to update pagerank at each iteration, hand over to GPU update_pagerank<<< num_blocks, blocksize >>>(dev_ingoing_edge_nums, dev_outgoing_edge_nums, dev_begin_index, dev_adj_edges, dev_pre_pagerank, pr_dangling, dev_pr_random, dev_pagerank, dev_dangling_vec, num_v); dangling_pr_sum = thrust::reduce( dev_thrust_dangling_vec, dev_thrust_dangling_vec+num_v ); } cudaMemcpy( graph->hotData.pagerank.data(), dev_pagerank, num_v*sizeof(double), cudaMemcpyDeviceToHost ); cudaFree( dev_ingoing_edge_nums ); cudaFree( dev_outgoing_edge_nums ); cudaFree( dev_begin_index ); cudaFree( dev_adj_edges ); cudaFree( dev_pagerank ); cudaFree( dev_pre_pagerank ); cudaFree( dev_pr_random ); cudaFree( dev_dangling_vec ); #endif #if 0 // This is the original algorithm in CPU which we port to GPU for (unsigned i = 0; i < num_v; i++) { graph->hotData.pagerank[i] = init_rank; graph->hotData.pre_pagerank[i] = 0.0; } while (iter++ < max_iterations) { double dangling_pr_sum = 0.0; // Update the pagerank values in every iteration for (unsigned i = 0; i < num_v; i++) { graph->hotData.pre_pagerank[i] = graph->hotData.pagerank[i]; graph->hotData.pagerank[i] = 0.0; dangling_pr_sum += graph->hotData.pre_pagerank[i] * (graph->hotData.outgoing_edges_num[i] == 0); } double pr_dangling = damping_factor * dangling_pr_sum / num_v; // Iterater all the vertexes and calculate its adjacency function l(pi,pj) of all inward links // Update its pagerank value by adding pr_eigenvector from its inward links separately for( int i = 0; i < num_v; ++i ) { unsigned inward_edges_num = graph->ingoing_edges_num[i]; int begin_index = graph->beginIndex[i]; for( int j = 0; j < inward_edges_num; ++j){ unsigned inward_edge_index = graph->adjE[begin_index + j]; double pr_eigenvector = damping_factor * graph->hotData.pre_pagerank[inward_edge_index] / graph->hotData.outgoing_edges_num[inward_edge_index]; graph->hotData.pagerank[i] += pr_eigenvector; } graph->hotData.pagerank[i] += (pr_random + pr_dangling); } // finish when cur_toleranceor is smaller than tolerance we set if(ToleranceCheck(num_v, graph->hotData)) { std::cout << "Iteration time: " << iter << std::endl; break; } } #endif } void printFinalResults(GPU_Graph* graph) { std::cout << "PageRank values: \n"; for(int i = 0; i < graph->VertexesNum(); ++i) { std::cout << "The index is: " << i << " with value " << graph->hotData.pagerank[i] << '\n'; } std::cout<<'\n'; } void PrintBenchmark(std::chrono::time_point<std::chrono::steady_clock> start_t, std::chrono::time_point<std::chrono::steady_clock> const end_t, const unsigned loop_t) { auto const avg_time = std::chrono::duration_cast<std::chrono::microseconds>( end_t - start_t ).count() / double(loop_t); std::cout << "Average total running time = " << avg_time << " us" << std::endl; } int main(int argc, char *argv[]) { unsigned loop_times = 10; unsigned num_vertices = 0; if(argc >= 4) { const char* test_mode = argv[2]; ColdEdge input = ReadInputFromTextFile(argv[1], num_vertices); if(std::strcmp(test_mode, "total") == 0) { auto const start_time = std::chrono::steady_clock::now(); for (int i = 0; i < loop_times; i++) { GPU_Graph graph(num_vertices, input); PageRank(&graph); //printFinalResults(&graph); } auto const end_time = std::chrono::steady_clock::now(); PrintBenchmark(start_time, end_time, loop_times); } else if(std::strcmp(test_mode, "graph") == 0 ) { auto const start_time = std::chrono::steady_clock::now(); GPU_Graph graph(num_vertices, input); auto const end_time = std::chrono::steady_clock::now(); PageRank(&graph); PrintBenchmark(start_time, end_time, 1); } else if(std::strcmp(test_mode, "pagerank") == 0) { GPU_Graph graph(num_vertices, input); auto const start_time = std::chrono::steady_clock::now(); for (unsigned i = 0; i < loop_times; i++) { PageRank(&graph); } auto const end_time = std::chrono::steady_clock::now(); PrintBenchmark(start_time, end_time, loop_times); } else { std::cout << "Invalid Input!" << std::endl; std::cout << "Please input the input text file name wanted in argc[1]" << std::endl; std::cout << "Please input the time mode(total/graph/pangerank) to be record in argc[2]" << std::endl; std::cout << "Please input the number of threads wanted to use in argc[3]" << std::endl; } } else if (argc >= 2 && argc < 4) { ColdEdge input = ReadInputFromTextFile(argv[1], num_vertices); GPU_Graph graph(num_vertices, input); auto const start_time = std::chrono::steady_clock::now(); for (int i = 0; i < loop_times; i++) { PageRank(&graph); //printFinalResults(&graph); } auto const end_time = std::chrono::steady_clock::now(); PrintBenchmark(start_time, end_time, loop_times); } else { std::cout << "Invalid Input: " << std::endl; std::cout << "Please input the input text file name wanted in argc[1]" << std::endl; std::cout << "Please input the time mode(total/graph/pangerank) to be record in argc[2]" << std::endl; std::cout << "Please input the number of threads wanted to use in argc[3]" << std::endl; } return 0; }
3da75fcacafb9c09db53cbb7a94271e37cb679ea.hip
// !!! This is a file automatically generated by hipify!!! // GraphicsTemplate.cpp // ////////////////////////////////////////////////////////////////////////////////////////// // includes ////////////////////////////////////////////////////////////////////////////////////////// #include <iostream> using std::cout; using std::cerr; #include <tchar.h> #include <windows.h> #include "globals_hip.cuh" #include <vector> using std::vector; #include "NA_MathsLib.cuh" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> //http://www.cplusplus.com/reference/chrono/high_resolution_clock/now/ #include <ctime> #include <ratio> #include <chrono> using namespace std::chrono; //http://www.cplusplus.com/reference/string/stoi/ #include <string> ///////////////////////////////////////////////////////////////////////// // CUDA Main ///////////////////////////////////////////////////////////////////////// //general notes #define CUDA_CALL(x) { const hipError_t a = (x); if (a != hipSuccess) {printf("\nCUDA Error: %s (err_num=%d) \n",cudaGetErrorScring(a),a); hipDeviceReset(); assert(0);}} // Shane Cook - CUDA Programming A Developer's Guide to Parallel Computing with GPUs ISBN:978-0-12-415933-4 P67 //hipDeviceSynchronize(); //have the CPU wait for the GPU //cudaKernal<<<num_blocks, num_threads>>>(args ...); // running the kernal on the GPU struct psudoVector2 { float x; float y; }; struct psudoBoid { psudoVector2 position; psudoVector2 currentVelocity; }; __global__ void cudaBoidUpdate(psudoBoid* globalBoidArray, int loopCount, const int BOID_MAX) { bool debug = false; bool allThreadsDebug = false; int selfIndex = (int)threadIdx.x; // slightly more readable and means less casting if ((selfIndex == 0 && debug) || allThreadsDebug) printf("kernel launched\n"); // create shared array for communicating between threads/boids __shared__ psudoBoid* sharedBoidArray; if(selfIndex == 0) sharedBoidArray = (psudoBoid*)malloc(BOID_MAX * sizeof(psudoBoid)); __syncthreads(); sharedBoidArray[selfIndex] = globalBoidArray[selfIndex]; // every boid copies it own data into the shared memory if ((selfIndex ==0 && debug) || allThreadsDebug) printf("init complete\n"); psudoBoid* localBoidArray = (psudoBoid*)malloc(BOID_MAX * sizeof(psudoBoid)); // each thread has its own cache of the shared array, this is likely the cause of 551 boid limit int* nearbyBoidIndexer = (int*)malloc(BOID_MAX * sizeof(int)); //save memory while creating short list with a trick for (int loop = 0; loop < loopCount; loop++) { if ((selfIndex ==0 && debug) || allThreadsDebug) printf("beginning loop %d\n", loop); // rebuild localBoidArray cache // starting at own boid, copy data into own memory int i = selfIndex; for (int j = 0; j < BOID_MAX; j++) { // actual copy localBoidArray[i] = sharedBoidArray[i]; i++; // next boid if (i == BOID_MAX) // wrap arround when walking off memeory i = 0; } if ((selfIndex ==0 && debug) || allThreadsDebug) printf("local cache rebuilt\n"); // find which boids are in range int nearbyBoidIndexSize = 0; for (int i = 0; i < BOID_MAX; i++) { // CPU version had a bug here //if (i == selfIndex) //{ //skip //} //else //{ psudoVector2 temp; temp.x = localBoidArray[i].position.x - localBoidArray[selfIndex].position.x; temp.y = localBoidArray[i].position.y - localBoidArray[selfIndex].position.y; int tempLength = sqrt(temp.x*temp.x + temp.y*temp.y); if (tempLength < BIOD_SIGHT_RANGE) // if the boid is close enough to be seen then add it to the nearby list { nearbyBoidIndexer[nearbyBoidIndexSize] = i; nearbyBoidIndexSize++; } //} } if ((selfIndex ==0 && debug) || allThreadsDebug) printf("sort list made\n"); // alightment psudoVector2 sumVelocity; sumVelocity.x = 0; sumVelocity.y = 0; // nearbyBoidIndexer trick in action for (int i = 0; i < nearbyBoidIndexSize; i++) { sumVelocity.x += localBoidArray[nearbyBoidIndexer[i]].currentVelocity.x; sumVelocity.y += localBoidArray[nearbyBoidIndexer[i]].currentVelocity.y; } // convert to average sumVelocity.x = sumVelocity.x / nearbyBoidIndexSize; sumVelocity.y = sumVelocity.y / nearbyBoidIndexSize; psudoVector2 newVelocity = sumVelocity; if ((selfIndex ==0 && debug) || allThreadsDebug) printf("alignment found\n"); // cohesion psudoVector2 sumPosition; sumPosition.x = 0; sumPosition.y = 0; for (int i = 0; i < nearbyBoidIndexSize; i++) // just realised I could combine this loop with the previous one // keeping them seperate to maintain readability { sumPosition.x += localBoidArray[nearbyBoidIndexer[i]].position.x; sumPosition.y += localBoidArray[nearbyBoidIndexer[i]].position.y; } // convert to average sumPosition.x = sumPosition.x / nearbyBoidIndexSize; sumPosition.y = sumPosition.y / nearbyBoidIndexSize; psudoVector2 temp; temp.x = localBoidArray[selfIndex].position.x - sumPosition.x; temp.y = localBoidArray[selfIndex].position.y - sumPosition.y; temp.x *= BOID_COHESION_WEIGHTING; temp.y *= BOID_COHESION_WEIGHTING; // modify velocity to head towards the average position newVelocity.x += temp.x; newVelocity.y += temp.y; if ((selfIndex ==0 && debug) || allThreadsDebug) printf("cohesion done\n"); // seperation for (int i = 0; i < nearbyBoidIndexSize; i++) // another for loop that could be merged? { if (nearbyBoidIndexer[i] != selfIndex) // skip self { psudoVector2 temp; temp.x = localBoidArray[selfIndex].position.x - localBoidArray[i].position.x; temp.y = localBoidArray[selfIndex].position.y - localBoidArray[i].position.y; int tempLength = sqrt(temp.x*temp.x + temp.y*temp.y); if (tempLength < BOID_RESPECT_DIST) { newVelocity = temp; } } } if ((selfIndex ==0 && debug) || allThreadsDebug) printf("seperation done\n"); // STUFF FROM CPU POST UPDATE METHOD // enforce rotation limit // commented out due to bug in NA_Vector::clockwiseAngle - it doesn't give a different value when you mesure from the other vector. Thiss means that the CPU version had this bug // also porting this to cuda would have made ugly code /*float newVelocityCurrentVelocityClockwiseAngle; //this is going to get messy - missing my vector library now float newVelocityLenSq = newVelocity.x*newVelocity.x + newVelocity.y*newVelocity.y; // I could possibly do some #defines for readability float currentVelocityLenSq = currentVelocity.x*currentVelocity.x + currentVelocity.y*currentVelocity.y; float dotProduct = newVelocity.x*currentVelocity.x + newVelocity.y*currentVelocity.y; newVelocityCurrentVelocityClockwiseAngle = acos(dotProduct / sqrt(newVelocityLenSq * currentVelocityLenSq)); float currentVelocityNewVelocityCClockwiseAngle; //there is a difference, the velocities are swapped float newVelocityLenSq = newVelocity.x*newVelocity.x + newVelocity.y*newVelocity.y; // I could possibly do some #defines for readability float currentVelocityLenSq = currentVelocity.x*currentVelocity.x + currentVelocity.y*currentVelocity.y; float dotProduct = newVelocity.x*currentVelocity.x + newVelocity.y*currentVelocity.y; newVelocityCurrentVelocityClockwiseAngle = acos(dotProduct / sqrt(newVelocityLenSq * currentVelocityLenSq)); if (newVelocityCurrentVelocityClockwiseAngle > BOID_ROTATE_MAX && currentVelocityNewVelocityCClockwiseAngle > BOID_ROTATE_MAX) { if (newVelocityCurrentVelocityClockwiseAngle < currentVelocityNewVelocityCClockwiseAngle)//clockwise or counterclockwise? { NA_Matrix r = NA_Matrix(NA_Matrix::types::rotateZ, BOID_ROTATE_MAX); newVelocity = r.matrixXvector(newVelocity); } else { NA_Matrix r = NA_Matrix(NA_Matrix::types::rotateZ, -BOID_ROTATE_MAX); newVelocity = r.matrixXvector(newVelocity); } }*/ // enforce speed limit if ((selfIndex ==0 && debug) || allThreadsDebug) printf("enforcing the speed limit\n"); float l = sqrt(newVelocity.x*newVelocity.x + newVelocity.y*newVelocity.y); if (l > BOID_SPEED_MAX); { // normalise and then scale newVelocity.x = (newVelocity.x / l)*BOID_SPEED_MAX; // I occasionally had NaN in boid data, I suspect that is was caused here from when the boid filtered itself out of the nearbyBoidIndexer newVelocity.y = (newVelocity.y / l)*BOID_SPEED_MAX; } if ((selfIndex ==0 && debug) || allThreadsDebug) printf("obaying the speed limit\n"); // update position with velocity localBoidArray[selfIndex].currentVelocity = newVelocity; localBoidArray[selfIndex].position.x += newVelocity.x; localBoidArray[selfIndex].position.y += newVelocity.y; if ((selfIndex ==0 && debug) || allThreadsDebug) printf("updated local cache\n"); // screen wrap if (localBoidArray[selfIndex].position.x < 0) localBoidArray[selfIndex].position.x += SCREEN_WIDTH; if (localBoidArray[selfIndex].position.x > SCREEN_WIDTH) localBoidArray[selfIndex].position.x -= SCREEN_WIDTH; if (localBoidArray[selfIndex].position.y < 0) localBoidArray[selfIndex].position.y += SCREEN_HEIGHT; if (localBoidArray[selfIndex].position.y > SCREEN_HEIGHT) localBoidArray[selfIndex].position.y -= SCREEN_HEIGHT; if ((selfIndex ==0 && debug) || allThreadsDebug) printf("staying within the world\n"); if ((selfIndex ==0 && debug) || allThreadsDebug) printf("waiting for everyone\n"); __syncthreads(); // update shared data sharedBoidArray[selfIndex] = localBoidArray[selfIndex]; if ((selfIndex ==0 && debug) || allThreadsDebug) printf("updated shared info\n"); //TODO: cuda/opengl interop render // wait for all threads (get ready for next round) if ((selfIndex ==0 && debug) || allThreadsDebug) printf("waiting for next loop\n"); __syncthreads(); } // threads cleanup their local stuff free(nearbyBoidIndexer); free(localBoidArray); // put stuff back in global memory so that CPU can collect it if wanted globalBoidArray[selfIndex] = sharedBoidArray[selfIndex]; __syncthreads(); if (selfIndex == 0) free(sharedBoidArray); } int _tmain(int argc, _TCHAR* argv[]) { int loopCount; if (argc != 3) { std::cerr << "usage: " << argv[0] << " <boidCount> <loopCount> \n"; cout << "errored\n"; return -1; } else { // convert to ints, not sure what happens if can't BOID_MAX = std::stoi(argv[1], NULL); //http://www.cplusplus.com/reference/string/stoi/ loopCount = std::stoi(argv[2], NULL); } const int numberOfBlocks = 1; const int numberOfThreadsPerBlock = BOID_MAX; // expect errors with over 1024 boids // set up cuda hipError_t err = hipSetDevice(0); hipDeviceReset(); if (err != hipSuccess) { cerr << "GraphicsTemplate::_tmain - failed to set device\n"; cout << "errored\n"; hipDeviceReset(); return -1; } // make all boids extern NA_MathsLib na_maths; na_maths.seedDice(0); // fixed seed matches CPU version psudoBoid* boidArray = (psudoBoid*) malloc(BOID_MAX * sizeof(psudoBoid)); //psudoBoid boidArray[BOID_MAX]; //set initual values for (int i = 0; i < BOID_MAX; i++) { boidArray[i].position.x = na_maths.dice(SCREEN_WIDTH); boidArray[i].position.y = na_maths.dice(SCREEN_HEIGHT); boidArray[i].currentVelocity.x = float(na_maths.dice(-100, 100)) / 100.0f; boidArray[i].currentVelocity.y = float(na_maths.dice(-100, 100)) / 100.0f; } // tell cuda to allocate space for boids and copy boids to cuda psudoBoid* deviceBoidArray; err = hipMalloc((void**)&deviceBoidArray, BOID_MAX * sizeof(psudoBoid)); if (err != hipSuccess) { cerr << "GraphicsTemplate::_tmain - failed to allocate memory on device\n"; hipFree(deviceBoidArray); free(boidArray); cout << "errored\n"; hipDeviceReset(); return -1; } err = hipMemcpy(deviceBoidArray, boidArray, BOID_MAX * sizeof(psudoBoid), hipMemcpyHostToDevice); if (err != hipSuccess) { cerr << "GraphicsTemplate::_tmain - failed to copy memory to device\n"; hipFree(deviceBoidArray); free(boidArray); cout << "errored\n"; hipDeviceReset(); return -1; } // loopCount is a normal variable, no need to hipMalloc and CudaMemcpy // run kernel //std::cout << "Simulating boids\n"; high_resolution_clock::time_point t1 = high_resolution_clock::now(); cudaBoidUpdate << <numberOfBlocks, numberOfThreadsPerBlock>> >(deviceBoidArray, loopCount, BOID_MAX); // launch the GPU kernel err = hipGetLastError(); if (err != hipSuccess) { cerr << "GraphicsTemplate::_tmain - failed to launch kernel: " << hipGetErrorString(err) << "\n"; hipFree(deviceBoidArray); free(boidArray); cout << "errored\n"; hipDeviceReset(); return -1; } // wait for GPU to finish err = hipDeviceSynchronize(); high_resolution_clock::time_point t2 = high_resolution_clock::now(); if (err != hipSuccess) { cerr << "GraphicsTemplate::_tmain - cudaDeviceSync returned " << err << " errorString = " << hipGetErrorString(err) << "\n"; hipFree(deviceBoidArray); free(boidArray); cout << "errored\n"; hipDeviceReset(); return -1; } // test did not error, calculate time taken and printout duration<double> time_span = duration_cast<duration<double>>(t2 - t1); cout << time_span.count() << "\n"; // all ok, cleanup and exit hipFree(deviceBoidArray); //cout << "all done\n"; free(boidArray); std::cerr << "all ok\n"; // only here so that the error cvs file is easier to read hipDeviceReset(); return 0; }
3da75fcacafb9c09db53cbb7a94271e37cb679ea.cu
// GraphicsTemplate.cpp // ////////////////////////////////////////////////////////////////////////////////////////// // includes ////////////////////////////////////////////////////////////////////////////////////////// #include <iostream> using std::cout; using std::cerr; #include <tchar.h> #include <windows.h> #include "globals.cuh" #include <vector> using std::vector; #include "NA_MathsLib.cuh" #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cuda.h> #include <device_functions.h> #include <cuda_runtime_api.h> //http://www.cplusplus.com/reference/chrono/high_resolution_clock/now/ #include <ctime> #include <ratio> #include <chrono> using namespace std::chrono; //http://www.cplusplus.com/reference/string/stoi/ #include <string> ///////////////////////////////////////////////////////////////////////// // CUDA Main ///////////////////////////////////////////////////////////////////////// //general notes #define CUDA_CALL(x) { const cudaError_t a = (x); if (a != cudaSuccess) {printf("\nCUDA Error: %s (err_num=%d) \n",cudaGetErrorScring(a),a); cudaDeviceReset(); assert(0);}} // Shane Cook - CUDA Programming A Developer's Guide to Parallel Computing with GPUs ISBN:978-0-12-415933-4 P67 //cudaDeviceSynchronize(); //have the CPU wait for the GPU //cudaKernal<<<num_blocks, num_threads>>>(args ...); // running the kernal on the GPU struct psudoVector2 { float x; float y; }; struct psudoBoid { psudoVector2 position; psudoVector2 currentVelocity; }; __global__ void cudaBoidUpdate(psudoBoid* globalBoidArray, int loopCount, const int BOID_MAX) { bool debug = false; bool allThreadsDebug = false; int selfIndex = (int)threadIdx.x; // slightly more readable and means less casting if ((selfIndex == 0 && debug) || allThreadsDebug) printf("kernel launched\n"); // create shared array for communicating between threads/boids __shared__ psudoBoid* sharedBoidArray; if(selfIndex == 0) sharedBoidArray = (psudoBoid*)malloc(BOID_MAX * sizeof(psudoBoid)); __syncthreads(); sharedBoidArray[selfIndex] = globalBoidArray[selfIndex]; // every boid copies it own data into the shared memory if ((selfIndex ==0 && debug) || allThreadsDebug) printf("init complete\n"); psudoBoid* localBoidArray = (psudoBoid*)malloc(BOID_MAX * sizeof(psudoBoid)); // each thread has its own cache of the shared array, this is likely the cause of 551 boid limit int* nearbyBoidIndexer = (int*)malloc(BOID_MAX * sizeof(int)); //save memory while creating short list with a trick for (int loop = 0; loop < loopCount; loop++) { if ((selfIndex ==0 && debug) || allThreadsDebug) printf("beginning loop %d\n", loop); // rebuild localBoidArray cache // starting at own boid, copy data into own memory int i = selfIndex; for (int j = 0; j < BOID_MAX; j++) { // actual copy localBoidArray[i] = sharedBoidArray[i]; i++; // next boid if (i == BOID_MAX) // wrap arround when walking off memeory i = 0; } if ((selfIndex ==0 && debug) || allThreadsDebug) printf("local cache rebuilt\n"); // find which boids are in range int nearbyBoidIndexSize = 0; for (int i = 0; i < BOID_MAX; i++) { // CPU version had a bug here //if (i == selfIndex) //{ //skip //} //else //{ psudoVector2 temp; temp.x = localBoidArray[i].position.x - localBoidArray[selfIndex].position.x; temp.y = localBoidArray[i].position.y - localBoidArray[selfIndex].position.y; int tempLength = sqrt(temp.x*temp.x + temp.y*temp.y); if (tempLength < BIOD_SIGHT_RANGE) // if the boid is close enough to be seen then add it to the nearby list { nearbyBoidIndexer[nearbyBoidIndexSize] = i; nearbyBoidIndexSize++; } //} } if ((selfIndex ==0 && debug) || allThreadsDebug) printf("sort list made\n"); // alightment psudoVector2 sumVelocity; sumVelocity.x = 0; sumVelocity.y = 0; // nearbyBoidIndexer trick in action for (int i = 0; i < nearbyBoidIndexSize; i++) { sumVelocity.x += localBoidArray[nearbyBoidIndexer[i]].currentVelocity.x; sumVelocity.y += localBoidArray[nearbyBoidIndexer[i]].currentVelocity.y; } // convert to average sumVelocity.x = sumVelocity.x / nearbyBoidIndexSize; sumVelocity.y = sumVelocity.y / nearbyBoidIndexSize; psudoVector2 newVelocity = sumVelocity; if ((selfIndex ==0 && debug) || allThreadsDebug) printf("alignment found\n"); // cohesion psudoVector2 sumPosition; sumPosition.x = 0; sumPosition.y = 0; for (int i = 0; i < nearbyBoidIndexSize; i++) // just realised I could combine this loop with the previous one // keeping them seperate to maintain readability { sumPosition.x += localBoidArray[nearbyBoidIndexer[i]].position.x; sumPosition.y += localBoidArray[nearbyBoidIndexer[i]].position.y; } // convert to average sumPosition.x = sumPosition.x / nearbyBoidIndexSize; sumPosition.y = sumPosition.y / nearbyBoidIndexSize; psudoVector2 temp; temp.x = localBoidArray[selfIndex].position.x - sumPosition.x; temp.y = localBoidArray[selfIndex].position.y - sumPosition.y; temp.x *= BOID_COHESION_WEIGHTING; temp.y *= BOID_COHESION_WEIGHTING; // modify velocity to head towards the average position newVelocity.x += temp.x; newVelocity.y += temp.y; if ((selfIndex ==0 && debug) || allThreadsDebug) printf("cohesion done\n"); // seperation for (int i = 0; i < nearbyBoidIndexSize; i++) // another for loop that could be merged? { if (nearbyBoidIndexer[i] != selfIndex) // skip self { psudoVector2 temp; temp.x = localBoidArray[selfIndex].position.x - localBoidArray[i].position.x; temp.y = localBoidArray[selfIndex].position.y - localBoidArray[i].position.y; int tempLength = sqrt(temp.x*temp.x + temp.y*temp.y); if (tempLength < BOID_RESPECT_DIST) { newVelocity = temp; } } } if ((selfIndex ==0 && debug) || allThreadsDebug) printf("seperation done\n"); // STUFF FROM CPU POST UPDATE METHOD // enforce rotation limit // commented out due to bug in NA_Vector::clockwiseAngle - it doesn't give a different value when you mesure from the other vector. Thiss means that the CPU version had this bug // also porting this to cuda would have made ugly code /*float newVelocityCurrentVelocityClockwiseAngle; //this is going to get messy - missing my vector library now float newVelocityLenSq = newVelocity.x*newVelocity.x + newVelocity.y*newVelocity.y; // I could possibly do some #defines for readability float currentVelocityLenSq = currentVelocity.x*currentVelocity.x + currentVelocity.y*currentVelocity.y; float dotProduct = newVelocity.x*currentVelocity.x + newVelocity.y*currentVelocity.y; newVelocityCurrentVelocityClockwiseAngle = acos(dotProduct / sqrt(newVelocityLenSq * currentVelocityLenSq)); float currentVelocityNewVelocityCClockwiseAngle; //there is a difference, the velocities are swapped float newVelocityLenSq = newVelocity.x*newVelocity.x + newVelocity.y*newVelocity.y; // I could possibly do some #defines for readability float currentVelocityLenSq = currentVelocity.x*currentVelocity.x + currentVelocity.y*currentVelocity.y; float dotProduct = newVelocity.x*currentVelocity.x + newVelocity.y*currentVelocity.y; newVelocityCurrentVelocityClockwiseAngle = acos(dotProduct / sqrt(newVelocityLenSq * currentVelocityLenSq)); if (newVelocityCurrentVelocityClockwiseAngle > BOID_ROTATE_MAX && currentVelocityNewVelocityCClockwiseAngle > BOID_ROTATE_MAX) { if (newVelocityCurrentVelocityClockwiseAngle < currentVelocityNewVelocityCClockwiseAngle)//clockwise or counterclockwise? { NA_Matrix r = NA_Matrix(NA_Matrix::types::rotateZ, BOID_ROTATE_MAX); newVelocity = r.matrixXvector(newVelocity); } else { NA_Matrix r = NA_Matrix(NA_Matrix::types::rotateZ, -BOID_ROTATE_MAX); newVelocity = r.matrixXvector(newVelocity); } }*/ // enforce speed limit if ((selfIndex ==0 && debug) || allThreadsDebug) printf("enforcing the speed limit\n"); float l = sqrt(newVelocity.x*newVelocity.x + newVelocity.y*newVelocity.y); if (l > BOID_SPEED_MAX); { // normalise and then scale newVelocity.x = (newVelocity.x / l)*BOID_SPEED_MAX; // I occasionally had NaN in boid data, I suspect that is was caused here from when the boid filtered itself out of the nearbyBoidIndexer newVelocity.y = (newVelocity.y / l)*BOID_SPEED_MAX; } if ((selfIndex ==0 && debug) || allThreadsDebug) printf("obaying the speed limit\n"); // update position with velocity localBoidArray[selfIndex].currentVelocity = newVelocity; localBoidArray[selfIndex].position.x += newVelocity.x; localBoidArray[selfIndex].position.y += newVelocity.y; if ((selfIndex ==0 && debug) || allThreadsDebug) printf("updated local cache\n"); // screen wrap if (localBoidArray[selfIndex].position.x < 0) localBoidArray[selfIndex].position.x += SCREEN_WIDTH; if (localBoidArray[selfIndex].position.x > SCREEN_WIDTH) localBoidArray[selfIndex].position.x -= SCREEN_WIDTH; if (localBoidArray[selfIndex].position.y < 0) localBoidArray[selfIndex].position.y += SCREEN_HEIGHT; if (localBoidArray[selfIndex].position.y > SCREEN_HEIGHT) localBoidArray[selfIndex].position.y -= SCREEN_HEIGHT; if ((selfIndex ==0 && debug) || allThreadsDebug) printf("staying within the world\n"); if ((selfIndex ==0 && debug) || allThreadsDebug) printf("waiting for everyone\n"); __syncthreads(); // update shared data sharedBoidArray[selfIndex] = localBoidArray[selfIndex]; if ((selfIndex ==0 && debug) || allThreadsDebug) printf("updated shared info\n"); //TODO: cuda/opengl interop render // wait for all threads (get ready for next round) if ((selfIndex ==0 && debug) || allThreadsDebug) printf("waiting for next loop\n"); __syncthreads(); } // threads cleanup their local stuff free(nearbyBoidIndexer); free(localBoidArray); // put stuff back in global memory so that CPU can collect it if wanted globalBoidArray[selfIndex] = sharedBoidArray[selfIndex]; __syncthreads(); if (selfIndex == 0) free(sharedBoidArray); } int _tmain(int argc, _TCHAR* argv[]) { int loopCount; if (argc != 3) { std::cerr << "usage: " << argv[0] << " <boidCount> <loopCount> \n"; cout << "errored\n"; return -1; } else { // convert to ints, not sure what happens if can't BOID_MAX = std::stoi(argv[1], NULL); //http://www.cplusplus.com/reference/string/stoi/ loopCount = std::stoi(argv[2], NULL); } const int numberOfBlocks = 1; const int numberOfThreadsPerBlock = BOID_MAX; // expect errors with over 1024 boids // set up cuda cudaError err = cudaSetDevice(0); cudaDeviceReset(); if (err != cudaSuccess) { cerr << "GraphicsTemplate::_tmain - failed to set device\n"; cout << "errored\n"; cudaDeviceReset(); return -1; } // make all boids extern NA_MathsLib na_maths; na_maths.seedDice(0); // fixed seed matches CPU version psudoBoid* boidArray = (psudoBoid*) malloc(BOID_MAX * sizeof(psudoBoid)); //psudoBoid boidArray[BOID_MAX]; //set initual values for (int i = 0; i < BOID_MAX; i++) { boidArray[i].position.x = na_maths.dice(SCREEN_WIDTH); boidArray[i].position.y = na_maths.dice(SCREEN_HEIGHT); boidArray[i].currentVelocity.x = float(na_maths.dice(-100, 100)) / 100.0f; boidArray[i].currentVelocity.y = float(na_maths.dice(-100, 100)) / 100.0f; } // tell cuda to allocate space for boids and copy boids to cuda psudoBoid* deviceBoidArray; err = cudaMalloc((void**)&deviceBoidArray, BOID_MAX * sizeof(psudoBoid)); if (err != cudaSuccess) { cerr << "GraphicsTemplate::_tmain - failed to allocate memory on device\n"; cudaFree(deviceBoidArray); free(boidArray); cout << "errored\n"; cudaDeviceReset(); return -1; } err = cudaMemcpy(deviceBoidArray, boidArray, BOID_MAX * sizeof(psudoBoid), cudaMemcpyHostToDevice); if (err != cudaSuccess) { cerr << "GraphicsTemplate::_tmain - failed to copy memory to device\n"; cudaFree(deviceBoidArray); free(boidArray); cout << "errored\n"; cudaDeviceReset(); return -1; } // loopCount is a normal variable, no need to cudaMalloc and CudaMemcpy // run kernel //std::cout << "Simulating boids\n"; high_resolution_clock::time_point t1 = high_resolution_clock::now(); cudaBoidUpdate << <numberOfBlocks, numberOfThreadsPerBlock>> >(deviceBoidArray, loopCount, BOID_MAX); // launch the GPU kernel err = cudaGetLastError(); if (err != cudaSuccess) { cerr << "GraphicsTemplate::_tmain - failed to launch kernel: " << cudaGetErrorString(err) << "\n"; cudaFree(deviceBoidArray); free(boidArray); cout << "errored\n"; cudaDeviceReset(); return -1; } // wait for GPU to finish err = cudaDeviceSynchronize(); high_resolution_clock::time_point t2 = high_resolution_clock::now(); if (err != cudaSuccess) { cerr << "GraphicsTemplate::_tmain - cudaDeviceSync returned " << err << " errorString = " << cudaGetErrorString(err) << "\n"; cudaFree(deviceBoidArray); free(boidArray); cout << "errored\n"; cudaDeviceReset(); return -1; } // test did not error, calculate time taken and printout duration<double> time_span = duration_cast<duration<double>>(t2 - t1); cout << time_span.count() << "\n"; // all ok, cleanup and exit cudaFree(deviceBoidArray); //cout << "all done\n"; free(boidArray); std::cerr << "all ok\n"; // only here so that the error cvs file is easier to read cudaDeviceReset(); return 0; }
bcea0d508348f4b19e25653a0c4974b9521fc4f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_SLICE_LAYER_INSTANTIATE #include "lbann/layers/transform/slice.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { using dim4 = gpu_lib::array<size_t, 4>; /** * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (max_input_dims[3] / bsize) x max_input_dims[2] x max_input_dims[1] */ template <typename T> __global__ void concat4d_kernel( size_t num_inputs, const T* __restrict__ * __restrict__ input_buffer_list, const dim4* __restrict__ input_dims_list, const dim4* __restrict__ input_strides_list, T* __restrict__ output_buffer, dim4 output_strides, const size_t* __restrict__ output_offset_list) { // Indices const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = gridDim.x * blockDim.x; const size_t nthreadsy = gridDim.y * blockDim.y; const size_t nthreadsz = gridDim.z * blockDim.z; for (size_t j=0; j<num_inputs; ++j) { // Current input tensor const auto& input_buffer = input_buffer_list[j]; const auto& input_dims = input_dims_list[j]; const auto& input_strides = input_strides_list[j]; const auto& output_offset = output_offset_list[j]; // Copy from input tensor to output tensor for (size_t i0=0; i0<input_dims[0]; ++i0) { for (size_t i1=gidz; i1<input_dims[1]; i1+=nthreadsz) { for (size_t i2=gidy; i2<input_dims[2]; i2+=nthreadsy) { for (size_t i3=gidx; i3<input_dims[3]; i3+=nthreadsx) { const auto& x = input_buffer[i0 * input_strides[0] + i1 * input_strides[1] + i2 * input_strides[2] + i3 * input_strides[3]]; auto& y = output_buffer[output_offset + i0 * output_strides[0] + i1 * output_strides[1] + i2 * output_strides[2] + i3 * output_strides[3]]; y = x; } } } } } } /** * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (max_output_dims[3] / bsize) x max_output_dims[2] x max_output_dims[1] * */ template <typename T> __global__ void slice4d_kernel( size_t num_outputs, const T* __restrict__ input_buffer, dim4 input_strides, const size_t* __restrict__ input_offset_list, T* __restrict__ * __restrict__ output_buffer_list, const dim4* __restrict__ output_dims_list, const dim4* __restrict__ output_strides_list) { // Indices const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = gridDim.x * blockDim.x; const size_t nthreadsy = gridDim.y * blockDim.y; const size_t nthreadsz = gridDim.z * blockDim.z; for (size_t j=0; j<num_outputs; ++j) { // Current output tensor const auto& input_offset = input_offset_list[j]; auto& output_buffer = output_buffer_list[j]; const auto& output_dims = output_dims_list[j]; const auto& output_strides = output_strides_list[j]; // Copy from input tensor to output tensor for (size_t i0=0; i0<output_dims[0]; ++i0) { for (size_t i1=gidz; i1<output_dims[1]; i1+=nthreadsz) { for (size_t i2=gidy; i2<output_dims[2]; i2+=nthreadsy) { for (size_t i3=gidx; i3<output_dims[3]; i3+=nthreadsx) { const auto& x = input_buffer[input_offset + i0 * input_strides[0] + i1 * input_strides[1] + i2 * input_strides[2] + i3 * input_strides[3]]; auto& y = output_buffer[i0 * output_strides[0] + i1 * output_strides[1] + i2 * output_strides[2] + i3 * output_strides[3]]; y = x; } } } } } } } // namespace <anon> template <typename TensorDataType> void fp_compute_impl( slice_layer<TensorDataType,data_layout::MODEL_PARALLEL,El::Device::GPU>& l) { // Tensor views have already been setup in fp_setup_outputs } template <typename TensorDataType> void bp_compute_impl( slice_layer<TensorDataType,data_layout::MODEL_PARALLEL,El::Device::GPU>& l) { // Stack Elemental matrices on top of each other // Note: Assume each mini-batch sample is flat. auto& input_grad = l.get_error_signals(); std::unique_ptr<El::AbstractDistMatrix<TensorDataType>> input_grad_v( input_grad.Construct(input_grad.Grid(), input_grad.Root())); size_t offset = l.m_slice_points.front(); for (size_t j=0; j<static_cast<size_t>(l.get_num_children()); ++j) { const auto& output_grad = l.get_prev_error_signals(j); El::View(*input_grad_v, input_grad, El::IR(offset, offset+output_grad.Height()), El::ALL); El::Copy(output_grad, *input_grad_v); offset += output_grad.Height(); } } template <typename TensorDataType> void fp_compute_impl( slice_layer<TensorDataType,data_layout::DATA_PARALLEL,El::Device::GPU>& l) { // Check that number of dimensions is valid /// @todo Support tensors with arbitrary number of dimensions const auto& input_dims = l.get_input_dims(); const size_t num_dims = input_dims.size(); if (num_dims > 3) { LBANN_ERROR(l.get_type()," layer \"",l.get_name(),"\" ", "is operating on ",num_dims,"-D tensors, ", "but only 3-D tensors are currently supported"); } // Get synchronization info from input tensor using LocalMatrix = El::Matrix<TensorDataType, El::Device::GPU>; const auto& input = l.get_prev_activations(); const auto& local_input = dynamic_cast<const LocalMatrix&>(input.LockedMatrix()); auto sync_info = gpu::get_sync_info(local_input); // Get dimensions and strides for each output tensor const size_t num_outputs = l.get_num_children(); std::vector<TensorDataType*> output_buffer_list; std::vector<dim4> output_dims_list, output_strides_list; dim4 max_output_dims{0,0,0,0}; for (size_t j=0; j<num_outputs; ++j) { auto& output = l.get_activations(j); const auto& output_dims = l.get_output_dims(j); // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(output_dims.rbegin(), output_dims.rend()); std::vector<size_t> rstrides(output_dims.size(), 1); for (size_t d=1; d<output_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(output.LocalWidth()); rstrides.push_back(output.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); output_buffer_list.push_back(output.Buffer()); output_dims_list.push_back({rdims[3], rdims[2], rdims[1], rdims[0]}); output_strides_list.push_back( {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}); for (size_t i=0; i<4; ++i) { max_output_dims[i] = ::max(max_output_dims[i], rdims[3-i]); } } // Get strides for input tensor dim4 input_strides; { // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(input_dims.rbegin(), input_dims.rend()); std::vector<size_t> rstrides(input_dims.size(), 1); for (size_t d=1; d<input_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(local_input.Width()); rstrides.push_back(local_input.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); input_strides = {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}; } // Compute each output tensor's offset in input tensor const size_t slice_dim_stride = input_strides[l.m_slice_dim+(4-num_dims)]; std::vector<size_t> input_offset_list; for (const auto& slice_point : l.m_slice_points) { input_offset_list.push_back(slice_point * slice_dim_stride); } // Pack tensor data into a CPU buffer l.m_workspace_event.synchronize(); l.m_workspace.resize( sizeof(size_t) * input_offset_list.size() + sizeof(TensorDataType*) * output_buffer_list.size() + sizeof(dim4) * output_dims_list.size() + sizeof(dim4) * output_strides_list.size()); size_t pos = 0; std::memcpy(&l.m_workspace[pos], input_offset_list.data(), sizeof(size_t) * input_offset_list.size()); pos += sizeof(size_t) * input_offset_list.size(); std::memcpy(&l.m_workspace[pos], output_buffer_list.data(), sizeof(TensorDataType*) * output_buffer_list.size()); pos += sizeof(TensorDataType*) * output_buffer_list.size(); std::memcpy(&l.m_workspace[pos], output_dims_list.data(), sizeof(dim4) * output_dims_list.size()); pos += sizeof(dim4) * output_dims_list.size(); std::memcpy(&l.m_workspace[pos], output_strides_list.data(), sizeof(dim4) * output_strides_list.size()); pos += sizeof(dim4) * output_strides_list.size(); // Copy tensor data to GPU hydrogen::simple_buffer<unsigned char, El::Device::GPU> device_workspace( l.m_workspace.size(), sync_info); unsigned char* device_workspace_ptr = device_workspace.data(); hydrogen::gpu::Copy1DToDevice(l.m_workspace.data(), device_workspace_ptr, l.m_workspace.size(), sync_info); l.m_workspace_event.record(sync_info.Stream()); pos = 0; auto&& device_input_offset_list = reinterpret_cast<const size_t*>(device_workspace_ptr+pos); pos += sizeof(size_t) * input_offset_list.size(); auto&& device_output_buffer_list = reinterpret_cast<TensorDataType**>(device_workspace_ptr+pos); pos += sizeof(TensorDataType*) * output_buffer_list.size(); auto&& device_output_dims_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_dims_list.size(); auto&& device_output_strides_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_strides_list.size(); // Launch GPU kernel const auto& max_output_size = (max_output_dims[0] * max_output_dims[1] * max_output_dims[2] * max_output_dims[3]); if (max_output_size > 0) { constexpr size_t block_size = 64; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (max_output_dims[3] + block_size - 1) / block_size; grid_dims.y = max_output_dims[2]; grid_dims.z = max_output_dims[1]; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( slice4d_kernel<TensorDataType>, grid_dims, block_dims, 0, sync_info, num_outputs, local_input.LockedBuffer(), input_strides, device_input_offset_list, device_output_buffer_list, device_output_dims_list, device_output_strides_list); } } template <typename TensorDataType> void bp_compute_impl( slice_layer<TensorDataType,data_layout::DATA_PARALLEL,El::Device::GPU>& l) { // Check that number of dimensions is valid /// @todo Support tensors with arbitrary number of dimensions const auto& input_dims = l.get_input_dims(); const size_t num_dims = input_dims.size(); if (num_dims > 3) { LBANN_ERROR(l.get_type()," layer \"",l.get_name(),"\" ", "is operating on ",num_dims,"-D tensors, ", "but only 3-D tensors are currently supported"); } // Get synchronization info from input gradient tensor using LocalMatrix = El::Matrix<TensorDataType, El::Device::GPU>; auto& input_grad = l.get_error_signals(); auto& local_input_grad = dynamic_cast<LocalMatrix&>(input_grad.Matrix()); auto sync_info = gpu::get_sync_info(local_input_grad); // Get dimensions and strides for each output gradient tensor const size_t num_outputs = l.get_num_children(); std::vector<const TensorDataType*> output_grad_buffer_list; std::vector<dim4> output_grad_dims_list, output_grad_strides_list; dim4 max_output_grad_dims{0,0,0,0}; for (size_t j=0; j<num_outputs; ++j) { const auto& output_grad = l.get_prev_error_signals(j); const auto& output_grad_dims = l.get_output_dims(j); // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(output_grad_dims.rbegin(), output_grad_dims.rend()); std::vector<size_t> rstrides(output_grad_dims.size(), 1); for (size_t d=1; d<output_grad_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(output_grad.LocalWidth()); rstrides.push_back(output_grad.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); output_grad_buffer_list.push_back(output_grad.LockedBuffer()); output_grad_dims_list.push_back({rdims[3], rdims[2], rdims[1], rdims[0]}); output_grad_strides_list.push_back( {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}); for (size_t i=0; i<4; ++i) { max_output_grad_dims[i] = ::max(max_output_grad_dims[i], rdims[3-i]); } } // Get strides for input gradient tensor dim4 input_grad_strides; { // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(input_dims.rbegin(), input_dims.rend()); std::vector<size_t> rstrides(input_dims.size(), 1); for (size_t d=1; d<input_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(local_input_grad.Width()); rstrides.push_back(local_input_grad.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); input_grad_strides = {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}; } // Compute offsets in input gradient tensor const size_t slice_dim_stride = input_grad_strides[l.m_slice_dim+(4-num_dims)]; std::vector<size_t> input_grad_offset_list; for (const auto& slice_point : l.m_slice_points) { input_grad_offset_list.push_back(slice_point * slice_dim_stride); } // Pack tensor data into a CPU buffer l.m_workspace_event.synchronize(); l.m_workspace.resize( sizeof(TensorDataType*) * output_grad_buffer_list.size() + sizeof(dim4) * output_grad_dims_list.size() + sizeof(dim4) * output_grad_strides_list.size() + sizeof(size_t) * input_grad_offset_list.size()); size_t pos = 0; std::memcpy(&l.m_workspace[pos], output_grad_buffer_list.data(), sizeof(TensorDataType*) * output_grad_buffer_list.size()); pos += sizeof(TensorDataType*) * output_grad_buffer_list.size(); std::memcpy(&l.m_workspace[pos], output_grad_dims_list.data(), sizeof(dim4) * output_grad_dims_list.size()); pos += sizeof(dim4) * output_grad_dims_list.size(); std::memcpy(&l.m_workspace[pos], output_grad_strides_list.data(), sizeof(dim4) * output_grad_strides_list.size()); pos += sizeof(dim4) * output_grad_strides_list.size(); std::memcpy(&l.m_workspace[pos], input_grad_offset_list.data(), sizeof(size_t) * input_grad_offset_list.size()); pos += sizeof(size_t) * input_grad_offset_list.size(); // Copy tensor data to GPU hydrogen::simple_buffer<unsigned char, El::Device::GPU> device_workspace( l.m_workspace.size(), sync_info); unsigned char* device_workspace_ptr = device_workspace.data(); hydrogen::gpu::Copy1DToDevice(l.m_workspace.data(), device_workspace_ptr, l.m_workspace.size(), sync_info); l.m_workspace_event.record(sync_info.Stream()); pos = 0; auto&& device_output_grad_buffer_list = reinterpret_cast<const TensorDataType**>(device_workspace_ptr+pos); pos += sizeof(TensorDataType*) * output_grad_buffer_list.size(); auto&& device_output_grad_dims_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_grad_dims_list.size(); auto&& device_output_grad_strides_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_grad_strides_list.size(); auto&& device_input_grad_offset_list = reinterpret_cast<const size_t*>(device_workspace_ptr+pos); pos += sizeof(size_t) * input_grad_offset_list.size(); // Launch GPU kernel const auto& max_output_grad_size = (max_output_grad_dims[0] * max_output_grad_dims[1] * max_output_grad_dims[2] * max_output_grad_dims[3]); if (max_output_grad_size > 0) { constexpr size_t block_size = 64; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (max_output_grad_dims[3] + block_size - 1) / block_size; grid_dims.y = max_output_grad_dims[2]; grid_dims.z = max_output_grad_dims[1]; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( concat4d_kernel<TensorDataType>, grid_dims, block_dims, 0, sync_info, num_outputs, device_output_grad_buffer_list, device_output_grad_dims_list, device_output_grad_strides_list, local_input_grad.Buffer(), input_grad_strides, device_input_grad_offset_list); } } // Explicit instantiation #define PROTO(T) \ template class slice_layer< \ T, data_layout::DATA_PARALLEL, El::Device::GPU>; \ template class slice_layer< \ T, data_layout::MODEL_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
bcea0d508348f4b19e25653a0c4974b9521fc4f8.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_SLICE_LAYER_INSTANTIATE #include "lbann/layers/transform/slice.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { using dim4 = gpu_lib::array<size_t, 4>; /** * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (max_input_dims[3] / bsize) x max_input_dims[2] x max_input_dims[1] */ template <typename T> __global__ void concat4d_kernel( size_t num_inputs, const T* __restrict__ * __restrict__ input_buffer_list, const dim4* __restrict__ input_dims_list, const dim4* __restrict__ input_strides_list, T* __restrict__ output_buffer, dim4 output_strides, const size_t* __restrict__ output_offset_list) { // Indices const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = gridDim.x * blockDim.x; const size_t nthreadsy = gridDim.y * blockDim.y; const size_t nthreadsz = gridDim.z * blockDim.z; for (size_t j=0; j<num_inputs; ++j) { // Current input tensor const auto& input_buffer = input_buffer_list[j]; const auto& input_dims = input_dims_list[j]; const auto& input_strides = input_strides_list[j]; const auto& output_offset = output_offset_list[j]; // Copy from input tensor to output tensor for (size_t i0=0; i0<input_dims[0]; ++i0) { for (size_t i1=gidz; i1<input_dims[1]; i1+=nthreadsz) { for (size_t i2=gidy; i2<input_dims[2]; i2+=nthreadsy) { for (size_t i3=gidx; i3<input_dims[3]; i3+=nthreadsx) { const auto& x = input_buffer[i0 * input_strides[0] + i1 * input_strides[1] + i2 * input_strides[2] + i3 * input_strides[3]]; auto& y = output_buffer[output_offset + i0 * output_strides[0] + i1 * output_strides[1] + i2 * output_strides[2] + i3 * output_strides[3]]; y = x; } } } } } } /** * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (max_output_dims[3] / bsize) x max_output_dims[2] x max_output_dims[1] * */ template <typename T> __global__ void slice4d_kernel( size_t num_outputs, const T* __restrict__ input_buffer, dim4 input_strides, const size_t* __restrict__ input_offset_list, T* __restrict__ * __restrict__ output_buffer_list, const dim4* __restrict__ output_dims_list, const dim4* __restrict__ output_strides_list) { // Indices const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z; const size_t nthreadsx = gridDim.x * blockDim.x; const size_t nthreadsy = gridDim.y * blockDim.y; const size_t nthreadsz = gridDim.z * blockDim.z; for (size_t j=0; j<num_outputs; ++j) { // Current output tensor const auto& input_offset = input_offset_list[j]; auto& output_buffer = output_buffer_list[j]; const auto& output_dims = output_dims_list[j]; const auto& output_strides = output_strides_list[j]; // Copy from input tensor to output tensor for (size_t i0=0; i0<output_dims[0]; ++i0) { for (size_t i1=gidz; i1<output_dims[1]; i1+=nthreadsz) { for (size_t i2=gidy; i2<output_dims[2]; i2+=nthreadsy) { for (size_t i3=gidx; i3<output_dims[3]; i3+=nthreadsx) { const auto& x = input_buffer[input_offset + i0 * input_strides[0] + i1 * input_strides[1] + i2 * input_strides[2] + i3 * input_strides[3]]; auto& y = output_buffer[i0 * output_strides[0] + i1 * output_strides[1] + i2 * output_strides[2] + i3 * output_strides[3]]; y = x; } } } } } } } // namespace <anon> template <typename TensorDataType> void fp_compute_impl( slice_layer<TensorDataType,data_layout::MODEL_PARALLEL,El::Device::GPU>& l) { // Tensor views have already been setup in fp_setup_outputs } template <typename TensorDataType> void bp_compute_impl( slice_layer<TensorDataType,data_layout::MODEL_PARALLEL,El::Device::GPU>& l) { // Stack Elemental matrices on top of each other // Note: Assume each mini-batch sample is flat. auto& input_grad = l.get_error_signals(); std::unique_ptr<El::AbstractDistMatrix<TensorDataType>> input_grad_v( input_grad.Construct(input_grad.Grid(), input_grad.Root())); size_t offset = l.m_slice_points.front(); for (size_t j=0; j<static_cast<size_t>(l.get_num_children()); ++j) { const auto& output_grad = l.get_prev_error_signals(j); El::View(*input_grad_v, input_grad, El::IR(offset, offset+output_grad.Height()), El::ALL); El::Copy(output_grad, *input_grad_v); offset += output_grad.Height(); } } template <typename TensorDataType> void fp_compute_impl( slice_layer<TensorDataType,data_layout::DATA_PARALLEL,El::Device::GPU>& l) { // Check that number of dimensions is valid /// @todo Support tensors with arbitrary number of dimensions const auto& input_dims = l.get_input_dims(); const size_t num_dims = input_dims.size(); if (num_dims > 3) { LBANN_ERROR(l.get_type()," layer \"",l.get_name(),"\" ", "is operating on ",num_dims,"-D tensors, ", "but only 3-D tensors are currently supported"); } // Get synchronization info from input tensor using LocalMatrix = El::Matrix<TensorDataType, El::Device::GPU>; const auto& input = l.get_prev_activations(); const auto& local_input = dynamic_cast<const LocalMatrix&>(input.LockedMatrix()); auto sync_info = gpu::get_sync_info(local_input); // Get dimensions and strides for each output tensor const size_t num_outputs = l.get_num_children(); std::vector<TensorDataType*> output_buffer_list; std::vector<dim4> output_dims_list, output_strides_list; dim4 max_output_dims{0,0,0,0}; for (size_t j=0; j<num_outputs; ++j) { auto& output = l.get_activations(j); const auto& output_dims = l.get_output_dims(j); // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(output_dims.rbegin(), output_dims.rend()); std::vector<size_t> rstrides(output_dims.size(), 1); for (size_t d=1; d<output_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(output.LocalWidth()); rstrides.push_back(output.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); output_buffer_list.push_back(output.Buffer()); output_dims_list.push_back({rdims[3], rdims[2], rdims[1], rdims[0]}); output_strides_list.push_back( {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}); for (size_t i=0; i<4; ++i) { max_output_dims[i] = std::max(max_output_dims[i], rdims[3-i]); } } // Get strides for input tensor dim4 input_strides; { // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(input_dims.rbegin(), input_dims.rend()); std::vector<size_t> rstrides(input_dims.size(), 1); for (size_t d=1; d<input_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(local_input.Width()); rstrides.push_back(local_input.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); input_strides = {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}; } // Compute each output tensor's offset in input tensor const size_t slice_dim_stride = input_strides[l.m_slice_dim+(4-num_dims)]; std::vector<size_t> input_offset_list; for (const auto& slice_point : l.m_slice_points) { input_offset_list.push_back(slice_point * slice_dim_stride); } // Pack tensor data into a CPU buffer l.m_workspace_event.synchronize(); l.m_workspace.resize( sizeof(size_t) * input_offset_list.size() + sizeof(TensorDataType*) * output_buffer_list.size() + sizeof(dim4) * output_dims_list.size() + sizeof(dim4) * output_strides_list.size()); size_t pos = 0; std::memcpy(&l.m_workspace[pos], input_offset_list.data(), sizeof(size_t) * input_offset_list.size()); pos += sizeof(size_t) * input_offset_list.size(); std::memcpy(&l.m_workspace[pos], output_buffer_list.data(), sizeof(TensorDataType*) * output_buffer_list.size()); pos += sizeof(TensorDataType*) * output_buffer_list.size(); std::memcpy(&l.m_workspace[pos], output_dims_list.data(), sizeof(dim4) * output_dims_list.size()); pos += sizeof(dim4) * output_dims_list.size(); std::memcpy(&l.m_workspace[pos], output_strides_list.data(), sizeof(dim4) * output_strides_list.size()); pos += sizeof(dim4) * output_strides_list.size(); // Copy tensor data to GPU hydrogen::simple_buffer<unsigned char, El::Device::GPU> device_workspace( l.m_workspace.size(), sync_info); unsigned char* device_workspace_ptr = device_workspace.data(); hydrogen::gpu::Copy1DToDevice(l.m_workspace.data(), device_workspace_ptr, l.m_workspace.size(), sync_info); l.m_workspace_event.record(sync_info.Stream()); pos = 0; auto&& device_input_offset_list = reinterpret_cast<const size_t*>(device_workspace_ptr+pos); pos += sizeof(size_t) * input_offset_list.size(); auto&& device_output_buffer_list = reinterpret_cast<TensorDataType**>(device_workspace_ptr+pos); pos += sizeof(TensorDataType*) * output_buffer_list.size(); auto&& device_output_dims_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_dims_list.size(); auto&& device_output_strides_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_strides_list.size(); // Launch GPU kernel const auto& max_output_size = (max_output_dims[0] * max_output_dims[1] * max_output_dims[2] * max_output_dims[3]); if (max_output_size > 0) { constexpr size_t block_size = 64; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (max_output_dims[3] + block_size - 1) / block_size; grid_dims.y = max_output_dims[2]; grid_dims.z = max_output_dims[1]; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( slice4d_kernel<TensorDataType>, grid_dims, block_dims, 0, sync_info, num_outputs, local_input.LockedBuffer(), input_strides, device_input_offset_list, device_output_buffer_list, device_output_dims_list, device_output_strides_list); } } template <typename TensorDataType> void bp_compute_impl( slice_layer<TensorDataType,data_layout::DATA_PARALLEL,El::Device::GPU>& l) { // Check that number of dimensions is valid /// @todo Support tensors with arbitrary number of dimensions const auto& input_dims = l.get_input_dims(); const size_t num_dims = input_dims.size(); if (num_dims > 3) { LBANN_ERROR(l.get_type()," layer \"",l.get_name(),"\" ", "is operating on ",num_dims,"-D tensors, ", "but only 3-D tensors are currently supported"); } // Get synchronization info from input gradient tensor using LocalMatrix = El::Matrix<TensorDataType, El::Device::GPU>; auto& input_grad = l.get_error_signals(); auto& local_input_grad = dynamic_cast<LocalMatrix&>(input_grad.Matrix()); auto sync_info = gpu::get_sync_info(local_input_grad); // Get dimensions and strides for each output gradient tensor const size_t num_outputs = l.get_num_children(); std::vector<const TensorDataType*> output_grad_buffer_list; std::vector<dim4> output_grad_dims_list, output_grad_strides_list; dim4 max_output_grad_dims{0,0,0,0}; for (size_t j=0; j<num_outputs; ++j) { const auto& output_grad = l.get_prev_error_signals(j); const auto& output_grad_dims = l.get_output_dims(j); // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(output_grad_dims.rbegin(), output_grad_dims.rend()); std::vector<size_t> rstrides(output_grad_dims.size(), 1); for (size_t d=1; d<output_grad_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(output_grad.LocalWidth()); rstrides.push_back(output_grad.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); output_grad_buffer_list.push_back(output_grad.LockedBuffer()); output_grad_dims_list.push_back({rdims[3], rdims[2], rdims[1], rdims[0]}); output_grad_strides_list.push_back( {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}); for (size_t i=0; i<4; ++i) { max_output_grad_dims[i] = std::max(max_output_grad_dims[i], rdims[3-i]); } } // Get strides for input gradient tensor dim4 input_grad_strides; { // Construct dimensions and strides in reverse order // Note: Assume each mini-batch sample is fully packed. std::vector<size_t> rdims(input_dims.rbegin(), input_dims.rend()); std::vector<size_t> rstrides(input_dims.size(), 1); for (size_t d=1; d<input_dims.size(); ++d) { rstrides[d] = rdims[d-1] * rstrides[d-1]; } rdims.push_back(local_input_grad.Width()); rstrides.push_back(local_input_grad.LDim()); // Pad tensor dimensions to 4D rdims.resize(4, 1); rstrides.resize(4, rstrides.back()); input_grad_strides = {rstrides[3], rstrides[2], rstrides[1], rstrides[0]}; } // Compute offsets in input gradient tensor const size_t slice_dim_stride = input_grad_strides[l.m_slice_dim+(4-num_dims)]; std::vector<size_t> input_grad_offset_list; for (const auto& slice_point : l.m_slice_points) { input_grad_offset_list.push_back(slice_point * slice_dim_stride); } // Pack tensor data into a CPU buffer l.m_workspace_event.synchronize(); l.m_workspace.resize( sizeof(TensorDataType*) * output_grad_buffer_list.size() + sizeof(dim4) * output_grad_dims_list.size() + sizeof(dim4) * output_grad_strides_list.size() + sizeof(size_t) * input_grad_offset_list.size()); size_t pos = 0; std::memcpy(&l.m_workspace[pos], output_grad_buffer_list.data(), sizeof(TensorDataType*) * output_grad_buffer_list.size()); pos += sizeof(TensorDataType*) * output_grad_buffer_list.size(); std::memcpy(&l.m_workspace[pos], output_grad_dims_list.data(), sizeof(dim4) * output_grad_dims_list.size()); pos += sizeof(dim4) * output_grad_dims_list.size(); std::memcpy(&l.m_workspace[pos], output_grad_strides_list.data(), sizeof(dim4) * output_grad_strides_list.size()); pos += sizeof(dim4) * output_grad_strides_list.size(); std::memcpy(&l.m_workspace[pos], input_grad_offset_list.data(), sizeof(size_t) * input_grad_offset_list.size()); pos += sizeof(size_t) * input_grad_offset_list.size(); // Copy tensor data to GPU hydrogen::simple_buffer<unsigned char, El::Device::GPU> device_workspace( l.m_workspace.size(), sync_info); unsigned char* device_workspace_ptr = device_workspace.data(); hydrogen::gpu::Copy1DToDevice(l.m_workspace.data(), device_workspace_ptr, l.m_workspace.size(), sync_info); l.m_workspace_event.record(sync_info.Stream()); pos = 0; auto&& device_output_grad_buffer_list = reinterpret_cast<const TensorDataType**>(device_workspace_ptr+pos); pos += sizeof(TensorDataType*) * output_grad_buffer_list.size(); auto&& device_output_grad_dims_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_grad_dims_list.size(); auto&& device_output_grad_strides_list = reinterpret_cast<const dim4*>(device_workspace_ptr+pos); pos += sizeof(dim4) * output_grad_strides_list.size(); auto&& device_input_grad_offset_list = reinterpret_cast<const size_t*>(device_workspace_ptr+pos); pos += sizeof(size_t) * input_grad_offset_list.size(); // Launch GPU kernel const auto& max_output_grad_size = (max_output_grad_dims[0] * max_output_grad_dims[1] * max_output_grad_dims[2] * max_output_grad_dims[3]); if (max_output_grad_size > 0) { constexpr size_t block_size = 64; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (max_output_grad_dims[3] + block_size - 1) / block_size; grid_dims.y = max_output_grad_dims[2]; grid_dims.z = max_output_grad_dims[1]; gpu_lib::clip_grid_dims(grid_dims); hydrogen::gpu::LaunchKernel( concat4d_kernel<TensorDataType>, grid_dims, block_dims, 0, sync_info, num_outputs, device_output_grad_buffer_list, device_output_grad_dims_list, device_output_grad_strides_list, local_input_grad.Buffer(), input_grad_strides, device_input_grad_offset_list); } } // Explicit instantiation #define PROTO(T) \ template class slice_layer< \ T, data_layout::DATA_PARALLEL, El::Device::GPU>; \ template class slice_layer< \ T, data_layout::MODEL_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
00cc35a00e5d016483886f778cb6750bd0dd86c8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <memory.h> #include <stdio.h> #include <time.h> #include <stdint.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hiprand/hiprand_kernel.h> #include <hip/device_functions.h> #define uint8 unsigned char #define uint32 unsigned long int #define SHA1_BLOCK_SIZE 20 typedef struct { uint8 data[64]; uint32 datalen; unsigned long long bitlen; uint32 state[5]; uint32 k[4]; } CUDA_SHA1_CTX; #ifndef ROTLEFT #define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b)))) #endif /*********************** FUNCTION DEFINITIONS ***********************/ __device__ __host__ __forceinline__ void cuda_sha1_transform(CUDA_SHA1_CTX* ctx, const uint8 data[]) { uint32 a, b, c, d, e, i, j, t, m[80]; for (i = 0, j = 0; i < 16; ++i, j += 4) m[i] = (data[j] << 24) + (data[j + 1] << 16) + (data[j + 2] << 8) + (data[j + 3]); for (; i < 80; ++i) { m[i] = (m[i - 3] ^ m[i - 8] ^ m[i - 14] ^ m[i - 16]); m[i] = (m[i] << 1) | (m[i] >> 31); } a = ctx->state[0]; b = ctx->state[1]; c = ctx->state[2]; d = ctx->state[3]; e = ctx->state[4]; for (i = 0; i < 20; ++i) { t = ROTLEFT(a, 5) + ((b & c) ^ (~b & d)) + e + ctx->k[0] + m[i]; e = d; d = c; c = ROTLEFT(b, 30); b = a; a = t; } for (; i < 40; ++i) { t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[1] + m[i]; e = d; d = c; c = ROTLEFT(b, 30); b = a; a = t; } for (; i < 60; ++i) { t = ROTLEFT(a, 5) + ((b & c) ^ (b & d) ^ (c & d)) + e + ctx->k[2] + m[i]; e = d; d = c; c = ROTLEFT(b, 30); b = a; a = t; } for (; i < 80; ++i) { t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[3] + m[i]; e = d; d = c; c = ROTLEFT(b, 30); b = a; a = t; } ctx->state[0] += a; ctx->state[1] += b; ctx->state[2] += c; ctx->state[3] += d; ctx->state[4] += e; } __device__ __host__ inline void cuda_sha1_init(CUDA_SHA1_CTX* ctx) { ctx->datalen = 0; ctx->bitlen = 0; ctx->state[0] = 0x67452301; ctx->state[1] = 0xEFCDAB89; ctx->state[2] = 0x98BADCFE; ctx->state[3] = 0x10325476; ctx->state[4] = 0xc3d2e1f0; ctx->k[0] = 0x5a827999; ctx->k[1] = 0x6ed9eba1; ctx->k[2] = 0x8f1bbcdc; ctx->k[3] = 0xca62c1d6; } __device__ __host__ inline void cuda_sha1_update(CUDA_SHA1_CTX* ctx, const uint8 data[], size_t len) { size_t i; for (i = 0; i < len; ++i) { ctx->data[ctx->datalen] = data[i]; ctx->datalen++; if (ctx->datalen == 64) { cuda_sha1_transform(ctx, ctx->data); ctx->bitlen += 512; ctx->datalen = 0; } } } __device__ __host__ inline void cuda_sha1_final(CUDA_SHA1_CTX* ctx, uint8 hash[]) { uint32 i; i = ctx->datalen; // Pad whatever data is left in the buffer. if (ctx->datalen < 56) { ctx->data[i++] = 0x80; while (i < 56) ctx->data[i++] = 0x00; } else { ctx->data[i++] = 0x80; while (i < 64) ctx->data[i++] = 0x00; cuda_sha1_transform(ctx, ctx->data); memset(ctx->data, 0, 56); } // Append to the padding the total message's length in bits and transform. ctx->bitlen += ctx->datalen * 8; ctx->data[63] = ctx->bitlen; ctx->data[62] = ctx->bitlen >> 8; ctx->data[61] = ctx->bitlen >> 16; ctx->data[60] = ctx->bitlen >> 24; ctx->data[59] = ctx->bitlen >> 32; ctx->data[58] = ctx->bitlen >> 40; ctx->data[57] = ctx->bitlen >> 48; ctx->data[56] = ctx->bitlen >> 56; cuda_sha1_transform(ctx, ctx->data); // Since this implementation uses little endian byte ordering and MD uses big endian, // reverse all the bytes when copying the final state to the output hash. for (i = 0; i < 4; ++i) { hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff; hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff; hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff; hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff; hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff; } } __device__ __host__ inline void sha1new(uint8* msg, uint8 length, uint8 sha1[20]) { CUDA_SHA1_CTX ctx; cuda_sha1_init(&ctx); cuda_sha1_update(&ctx, msg, length); cuda_sha1_final(&ctx, sha1); } /*__global__ void kernel_sha1_hash(uint8* indata, uint32 inlen, uint8* outdata, uint32 n_batch) { uint32 thread = blockIdx.x * blockDim.x + threadIdx.x; if (thread >= n_batch) { return; } uint8* in = indata + thread * inlen; uint8* out = outdata + thread * SHA1_BLOCK_SIZE; CUDA_SHA1_CTX ctx; cuda_sha1_init(&ctx); cuda_sha1_update(&ctx, in, inlen); cuda_sha1_final(&ctx, out); } extern "C" { void mcm_cuda_sha1_hash_batch(uint8* in, uint32 inlen, uint8* out, uint32 n_batch) { uint8* cuda_indata; uint8* cuda_outdata; hipMalloc(&cuda_indata, inlen * n_batch); hipMalloc(&cuda_outdata, SHA1_BLOCK_SIZE * n_batch); hipMemcpy(cuda_indata, in, inlen * n_batch, hipMemcpyHostToDevice); uint8 thread = 256; uint8 block = (n_batch + thread - 1) / thread; kernel_sha1_hash << < block, thread >> > (cuda_indata, inlen, cuda_outdata, n_batch); hipMemcpy(out, cuda_outdata, SHA1_BLOCK_SIZE * n_batch, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if (error != hipSuccess) { printf("Error cuda sha1 hash: %s \n", hipGetErrorString(error)); } hipFree(cuda_indata); hipFree(cuda_outdata); } } */
00cc35a00e5d016483886f778cb6750bd0dd86c8.cu
#include <stdlib.h> #include <memory.h> #include <stdio.h> #include <time.h> #include <stdint.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <curand_kernel.h> #include <device_functions.h> #define uint8 unsigned char #define uint32 unsigned long int #define SHA1_BLOCK_SIZE 20 typedef struct { uint8 data[64]; uint32 datalen; unsigned long long bitlen; uint32 state[5]; uint32 k[4]; } CUDA_SHA1_CTX; #ifndef ROTLEFT #define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b)))) #endif /*********************** FUNCTION DEFINITIONS ***********************/ __device__ __host__ __forceinline__ void cuda_sha1_transform(CUDA_SHA1_CTX* ctx, const uint8 data[]) { uint32 a, b, c, d, e, i, j, t, m[80]; for (i = 0, j = 0; i < 16; ++i, j += 4) m[i] = (data[j] << 24) + (data[j + 1] << 16) + (data[j + 2] << 8) + (data[j + 3]); for (; i < 80; ++i) { m[i] = (m[i - 3] ^ m[i - 8] ^ m[i - 14] ^ m[i - 16]); m[i] = (m[i] << 1) | (m[i] >> 31); } a = ctx->state[0]; b = ctx->state[1]; c = ctx->state[2]; d = ctx->state[3]; e = ctx->state[4]; for (i = 0; i < 20; ++i) { t = ROTLEFT(a, 5) + ((b & c) ^ (~b & d)) + e + ctx->k[0] + m[i]; e = d; d = c; c = ROTLEFT(b, 30); b = a; a = t; } for (; i < 40; ++i) { t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[1] + m[i]; e = d; d = c; c = ROTLEFT(b, 30); b = a; a = t; } for (; i < 60; ++i) { t = ROTLEFT(a, 5) + ((b & c) ^ (b & d) ^ (c & d)) + e + ctx->k[2] + m[i]; e = d; d = c; c = ROTLEFT(b, 30); b = a; a = t; } for (; i < 80; ++i) { t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[3] + m[i]; e = d; d = c; c = ROTLEFT(b, 30); b = a; a = t; } ctx->state[0] += a; ctx->state[1] += b; ctx->state[2] += c; ctx->state[3] += d; ctx->state[4] += e; } __device__ __host__ inline void cuda_sha1_init(CUDA_SHA1_CTX* ctx) { ctx->datalen = 0; ctx->bitlen = 0; ctx->state[0] = 0x67452301; ctx->state[1] = 0xEFCDAB89; ctx->state[2] = 0x98BADCFE; ctx->state[3] = 0x10325476; ctx->state[4] = 0xc3d2e1f0; ctx->k[0] = 0x5a827999; ctx->k[1] = 0x6ed9eba1; ctx->k[2] = 0x8f1bbcdc; ctx->k[3] = 0xca62c1d6; } __device__ __host__ inline void cuda_sha1_update(CUDA_SHA1_CTX* ctx, const uint8 data[], size_t len) { size_t i; for (i = 0; i < len; ++i) { ctx->data[ctx->datalen] = data[i]; ctx->datalen++; if (ctx->datalen == 64) { cuda_sha1_transform(ctx, ctx->data); ctx->bitlen += 512; ctx->datalen = 0; } } } __device__ __host__ inline void cuda_sha1_final(CUDA_SHA1_CTX* ctx, uint8 hash[]) { uint32 i; i = ctx->datalen; // Pad whatever data is left in the buffer. if (ctx->datalen < 56) { ctx->data[i++] = 0x80; while (i < 56) ctx->data[i++] = 0x00; } else { ctx->data[i++] = 0x80; while (i < 64) ctx->data[i++] = 0x00; cuda_sha1_transform(ctx, ctx->data); memset(ctx->data, 0, 56); } // Append to the padding the total message's length in bits and transform. ctx->bitlen += ctx->datalen * 8; ctx->data[63] = ctx->bitlen; ctx->data[62] = ctx->bitlen >> 8; ctx->data[61] = ctx->bitlen >> 16; ctx->data[60] = ctx->bitlen >> 24; ctx->data[59] = ctx->bitlen >> 32; ctx->data[58] = ctx->bitlen >> 40; ctx->data[57] = ctx->bitlen >> 48; ctx->data[56] = ctx->bitlen >> 56; cuda_sha1_transform(ctx, ctx->data); // Since this implementation uses little endian byte ordering and MD uses big endian, // reverse all the bytes when copying the final state to the output hash. for (i = 0; i < 4; ++i) { hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff; hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff; hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff; hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff; hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff; } } __device__ __host__ inline void sha1new(uint8* msg, uint8 length, uint8 sha1[20]) { CUDA_SHA1_CTX ctx; cuda_sha1_init(&ctx); cuda_sha1_update(&ctx, msg, length); cuda_sha1_final(&ctx, sha1); } /*__global__ void kernel_sha1_hash(uint8* indata, uint32 inlen, uint8* outdata, uint32 n_batch) { uint32 thread = blockIdx.x * blockDim.x + threadIdx.x; if (thread >= n_batch) { return; } uint8* in = indata + thread * inlen; uint8* out = outdata + thread * SHA1_BLOCK_SIZE; CUDA_SHA1_CTX ctx; cuda_sha1_init(&ctx); cuda_sha1_update(&ctx, in, inlen); cuda_sha1_final(&ctx, out); } extern "C" { void mcm_cuda_sha1_hash_batch(uint8* in, uint32 inlen, uint8* out, uint32 n_batch) { uint8* cuda_indata; uint8* cuda_outdata; cudaMalloc(&cuda_indata, inlen * n_batch); cudaMalloc(&cuda_outdata, SHA1_BLOCK_SIZE * n_batch); cudaMemcpy(cuda_indata, in, inlen * n_batch, cudaMemcpyHostToDevice); uint8 thread = 256; uint8 block = (n_batch + thread - 1) / thread; kernel_sha1_hash << < block, thread >> > (cuda_indata, inlen, cuda_outdata, n_batch); cudaMemcpy(out, cuda_outdata, SHA1_BLOCK_SIZE * n_batch, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf("Error cuda sha1 hash: %s \n", cudaGetErrorString(error)); } cudaFree(cuda_indata); cudaFree(cuda_outdata); } } */
f02f137a17d233205ecc20be8f4e6cda56805c4a.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <math.h> #include </usr/local/cuda-9.0/targets/x86_64-linux/include/hiprand/hiprand_kernel.h> // Helper functions __device__ __forceinline__ size_t calcLinInd4(int idx_a, int idx_b, int idx_c, int idx_d, size_t AD, size_t BD,size_t CD, size_t DD ){ /* if (idx_d>= DD || idx_d < 0){ printf("fault1"); } if (idx_c>= CD || idx_c < 0){ printf("fault2"); } if (idx_c>= BD || idx_b < 0){ printf("fault3"); } if (idx_c>= AD || idx_a < 0){ printf("fault4"); }*/ return idx_d + DD*(idx_c + CD*(idx_b + BD*(idx_a))); } // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ************************************************ Kernels *********************************************************** #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } template <typename scalar_t> __global__ void mdconv_cuda_forward_kernel( const scalar_t* __restrict__ input, const float* __restrict__ l_filt, scalar_t* __restrict__ out, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_w, const size_t inp_h, const size_t inp_c, const size_t inp_b, const int totalOutPx, const int totalTreads, const float minusplus ) { const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; if (threadlinidx < totalOutPx){ // Calculate Imout Indices int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int im_c_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)); size_t size_temp_reg; float this_px_out=0; float float_temp; float this_px_this_pxfilt_result; size_t input_idx=0; int dh; int dw; int chan; int cur_im_h =0; int cur_im_w =0; for ( dh= 0 ; dh < filt_h; dh++){ cur_im_h= dh + im_h_idx - ((filt_h)/2); if (cur_im_h< 0){ continue; } if (cur_im_h >= inp_h){ break; } for (dw = 0 ; dw < filt_w; dw++ ){ cur_im_w = dw + im_w_idx - ((filt_w)/2); if (cur_im_w<0){ continue; } if (cur_im_w >= inp_w){ break; } for ( chan = 0 ; chan < inp_c; chan++){ // find the correct index of filt // get the index val from input // add to final answer; //size_temp_reg = calcLinInd( dh, dw, chan,im_c_idx, filt_h, filt_w, inp_c); //input_idx = calcLinInd(cur_im_h, cur_im_w, chan, im_b_idx, inp_h, inp_w, inp_c); size_temp_reg = calcLinInd4( im_c_idx, chan, dh,dw,filt_n, inp_c, filt_h, filt_w); input_idx = calcLinInd4( im_b_idx, chan,cur_im_h, cur_im_w,inp_b, inp_c , inp_h, inp_w); float_temp = minusplus*(l_filt[size_temp_reg] - ( input[input_idx])); if (chan==0){ this_px_this_pxfilt_result = float_temp; } else{ this_px_this_pxfilt_result = fminf(float_temp,this_px_this_pxfilt_result); } } this_px_out += this_px_this_pxfilt_result; } } out[threadlinidx] = this_px_out; //////////////////// GL MEM WRITE } } template <typename scalar_t> __global__ void mdconv_cuda_backward_kernel( const scalar_t* __restrict__ input, //TODO: MAKE sure the dims are dzdin and the threads are compatible const scalar_t* __restrict__ lfilt, const scalar_t* __restrict__ dzdout, float* __restrict__ dzdin, float* __restrict__ dzdl_filt, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_w, const size_t inp_h, const size_t inp_c, const size_t inp_b, const int totalThreads, const float minusplus ) { const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; if (threadlinidx < totalThreads){ // Calculate Imout Indices int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int im_c_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)); size_t size_temp_reg; float float_temp; float min_val_temp; int chan_temp; size_t input_idx=0; int dh; int dw; int chan; int cur_im_h =0; int cur_im_w =0; for ( dh= 0 ; dh < filt_h; dh++){ cur_im_h= dh + im_h_idx - ((filt_h)/2); if (cur_im_h< 0){ continue; } if (cur_im_h >= inp_h){ break; } for (dw = 0 ; dw < filt_w; dw++ ){ cur_im_w = dw + im_w_idx - ((filt_w)/2); if (cur_im_w<0){ continue; } if (cur_im_w >= inp_w){ break; } min_val_temp= 0; chan_temp = 0; for ( chan = 0 ; chan < inp_c; chan++){ // find the correct index of filt // get the index val from input // add to final answer; //size_temp_reg = calcLinInd( dh, dw, chan,im_c_idx, filt_h, filt_w, inp_c); //input_idx = calcLinInd(cur_im_h, cur_im_w, chan, im_b_idx, inp_h, inp_w, inp_c); size_temp_reg = calcLinInd4( im_c_idx, chan, dh,dw,filt_n, inp_c, filt_h, filt_w); input_idx = calcLinInd4( im_b_idx, chan,cur_im_h, cur_im_w,inp_b, inp_c , inp_h, inp_w); float_temp = minusplus *(lfilt[size_temp_reg] - input[input_idx]); // Memory Access if (float_temp < min_val_temp){ min_val_temp = float_temp; chan_temp= chan;//inp_c ; } } // find the correct index of filt // get the index val from input // add to final answer; //size_temp_reg = calcLinInd( dh, dw, chan,im_c_idx, filt_h, filt_w, inp_c); //input_idx = calcLinInd(cur_im_h, cur_im_w, chan, im_b_idx, inp_h, inp_w, inp_c); size_temp_reg = calcLinInd4( im_c_idx, chan_temp, dh,dw,filt_n, inp_c, filt_h, filt_w); input_idx = calcLinInd4( im_b_idx, chan_temp,cur_im_h, cur_im_w,inp_b, inp_c , inp_h, inp_w); float_temp = minusplus*dzdout[threadlinidx] ; // Memory Access atomicAdd(&(dzdin[input_idx]),-float_temp); //Memory Access atomicAdd(&(dzdl_filt[size_temp_reg]),float_temp); // Memory Access } } } } // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ------------------------------------Kernel Call Wrappers ----------------------------------------------------------- at::Tensor mdconv_cuda_forward( at::Tensor input, at::Tensor log_filt, const float minusplus ) { //at::Tensor p_filt = (at::exp(log_filt)); // p_filt = p_filt.cumsum(1); const auto batch_sz = input.size(0); const auto im_height = input.size(2); const auto im_width = input.size(3); const auto im_nchans = input.size(1); const auto filt_num = log_filt.size(0); const auto filt_height = log_filt.size(2); const auto filt_width = log_filt.size(3); //printf("(%d,%d,%d,%d)\n", p_filt.size(0),p_filt.size(1),p_filt.size(2),p_filt.size(3)); //printf("filt_num:%d ",filt_num); auto out = at::zeros(input.type(),{batch_sz,filt_num,im_height,im_width}); //TODO: Remove except zero loop auto random = at::rand(input.type(),{filt_height,filt_width,batch_sz,filt_num,im_height,im_width}); const int totalOutPx = im_height*im_width*batch_sz*filt_num; // Single Loop const auto totalThreads = totalOutPx*filt_height*filt_width; const int totalThreads = totalOutPx; int j = 25; const int threadsperblock =j*32; int blockNum = (totalThreads/threadsperblock); if (totalThreads%threadsperblock != 0 ){ blockNum++; } const dim3 blocks(blockNum); //printf("blocks: %d, totaltherads/threadperbloc : %d", blocks,totalThreads/threadsperblock); AT_DISPATCH_FLOATING_TYPES(input.type(), "klconvs_forward_cuda", ([&] { hipLaunchKernelGGL(( mdconv_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threadsperblock), 0, 0, input.data<scalar_t>(), log_filt.data<float>(), out.data<scalar_t>(), filt_width, filt_height, filt_num, im_width, im_height, im_nchans, batch_sz, totalOutPx, totalThreads, minusplus ); })); //out = out.sum(0); /// ZEro Loop Version \TODO: rremove in case of diff kernel gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return out; } std::vector<at::Tensor> mdconv_cuda_backward(at::Tensor dzdout, at::Tensor input, at::Tensor log_filt, const float minusplus ) { const auto batch_sz = input.size(0); const auto im_height = input.size(2); const auto im_width = input.size(3); const auto im_nchans = input.size(1); const auto filt_num = log_filt.size(0); const auto filt_height = log_filt.size(2); const auto filt_width = log_filt.size(3); auto dzdinput = at::zeros_like(input); auto dzdlfilt = at::zeros_like(log_filt); const int totalThreads = im_height*im_width*batch_sz*filt_num; int j = 25; //TODO: Make J chosen automatically. the shared memory is the bottleneck. const int threadsperblock =j*32; //printf("shared mem bytes %d - KB: %d, j:%d ",shared_per_block, shared_per_block/1024,j); int blockNum = (totalThreads/threadsperblock); if (totalThreads%threadsperblock != 0 ){ blockNum++; } const dim3 blocks(blockNum); AT_DISPATCH_FLOATING_TYPES(input.type(), "mdconv_backward_cuda", ([&] { hipLaunchKernelGGL(( mdconv_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threadsperblock), 0, 0, // TODO : CHANGE KLCONVS AND KLCONV BACK AND FORTH. NO FORGET.... NEVER FORGET, it is easy not to seee. input.data<scalar_t>(), log_filt.data<scalar_t>(), dzdout.data<scalar_t>(), dzdinput.data<float>(), dzdlfilt.data<float>(), filt_width, filt_height, filt_num, im_width, im_height, im_nchans, batch_sz, totalThreads, minusplus ); })); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return {dzdinput, dzdlfilt}; }
f02f137a17d233205ecc20be8f4e6cda56805c4a.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <math.h> #include </usr/local/cuda-9.0/targets/x86_64-linux/include/curand_kernel.h> // Helper functions __device__ __forceinline__ size_t calcLinInd4(int idx_a, int idx_b, int idx_c, int idx_d, size_t AD, size_t BD,size_t CD, size_t DD ){ /* if (idx_d>= DD || idx_d < 0){ printf("fault1"); } if (idx_c>= CD || idx_c < 0){ printf("fault2"); } if (idx_c>= BD || idx_b < 0){ printf("fault3"); } if (idx_c>= AD || idx_a < 0){ printf("fault4"); }*/ return idx_d + DD*(idx_c + CD*(idx_b + BD*(idx_a))); } // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ************************************************ Kernels *********************************************************** #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } template <typename scalar_t> __global__ void mdconv_cuda_forward_kernel( const scalar_t* __restrict__ input, const float* __restrict__ l_filt, scalar_t* __restrict__ out, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_w, const size_t inp_h, const size_t inp_c, const size_t inp_b, const int totalOutPx, const int totalTreads, const float minusplus ) { const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; if (threadlinidx < totalOutPx){ // Calculate Imout Indices int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int im_c_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)); size_t size_temp_reg; float this_px_out=0; float float_temp; float this_px_this_pxfilt_result; size_t input_idx=0; int dh; int dw; int chan; int cur_im_h =0; int cur_im_w =0; for ( dh= 0 ; dh < filt_h; dh++){ cur_im_h= dh + im_h_idx - ((filt_h)/2); if (cur_im_h< 0){ continue; } if (cur_im_h >= inp_h){ break; } for (dw = 0 ; dw < filt_w; dw++ ){ cur_im_w = dw + im_w_idx - ((filt_w)/2); if (cur_im_w<0){ continue; } if (cur_im_w >= inp_w){ break; } for ( chan = 0 ; chan < inp_c; chan++){ // find the correct index of filt // get the index val from input // add to final answer; //size_temp_reg = calcLinInd( dh, dw, chan,im_c_idx, filt_h, filt_w, inp_c); //input_idx = calcLinInd(cur_im_h, cur_im_w, chan, im_b_idx, inp_h, inp_w, inp_c); size_temp_reg = calcLinInd4( im_c_idx, chan, dh,dw,filt_n, inp_c, filt_h, filt_w); input_idx = calcLinInd4( im_b_idx, chan,cur_im_h, cur_im_w,inp_b, inp_c , inp_h, inp_w); float_temp = minusplus*(l_filt[size_temp_reg] - ( input[input_idx])); if (chan==0){ this_px_this_pxfilt_result = float_temp; } else{ this_px_this_pxfilt_result = fminf(float_temp,this_px_this_pxfilt_result); } } this_px_out += this_px_this_pxfilt_result; } } out[threadlinidx] = this_px_out; //////////////////// GL MEM WRITE } } template <typename scalar_t> __global__ void mdconv_cuda_backward_kernel( const scalar_t* __restrict__ input, //TODO: MAKE sure the dims are dzdin and the threads are compatible const scalar_t* __restrict__ lfilt, const scalar_t* __restrict__ dzdout, float* __restrict__ dzdin, float* __restrict__ dzdl_filt, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_w, const size_t inp_h, const size_t inp_c, const size_t inp_b, const int totalThreads, const float minusplus ) { const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; if (threadlinidx < totalThreads){ // Calculate Imout Indices int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int im_c_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)); size_t size_temp_reg; float float_temp; float min_val_temp; int chan_temp; size_t input_idx=0; int dh; int dw; int chan; int cur_im_h =0; int cur_im_w =0; for ( dh= 0 ; dh < filt_h; dh++){ cur_im_h= dh + im_h_idx - ((filt_h)/2); if (cur_im_h< 0){ continue; } if (cur_im_h >= inp_h){ break; } for (dw = 0 ; dw < filt_w; dw++ ){ cur_im_w = dw + im_w_idx - ((filt_w)/2); if (cur_im_w<0){ continue; } if (cur_im_w >= inp_w){ break; } min_val_temp= 0; chan_temp = 0; for ( chan = 0 ; chan < inp_c; chan++){ // find the correct index of filt // get the index val from input // add to final answer; //size_temp_reg = calcLinInd( dh, dw, chan,im_c_idx, filt_h, filt_w, inp_c); //input_idx = calcLinInd(cur_im_h, cur_im_w, chan, im_b_idx, inp_h, inp_w, inp_c); size_temp_reg = calcLinInd4( im_c_idx, chan, dh,dw,filt_n, inp_c, filt_h, filt_w); input_idx = calcLinInd4( im_b_idx, chan,cur_im_h, cur_im_w,inp_b, inp_c , inp_h, inp_w); float_temp = minusplus *(lfilt[size_temp_reg] - input[input_idx]); // Memory Access if (float_temp < min_val_temp){ min_val_temp = float_temp; chan_temp= chan;//inp_c ; } } // find the correct index of filt // get the index val from input // add to final answer; //size_temp_reg = calcLinInd( dh, dw, chan,im_c_idx, filt_h, filt_w, inp_c); //input_idx = calcLinInd(cur_im_h, cur_im_w, chan, im_b_idx, inp_h, inp_w, inp_c); size_temp_reg = calcLinInd4( im_c_idx, chan_temp, dh,dw,filt_n, inp_c, filt_h, filt_w); input_idx = calcLinInd4( im_b_idx, chan_temp,cur_im_h, cur_im_w,inp_b, inp_c , inp_h, inp_w); float_temp = minusplus*dzdout[threadlinidx] ; // Memory Access atomicAdd(&(dzdin[input_idx]),-float_temp); //Memory Access atomicAdd(&(dzdl_filt[size_temp_reg]),float_temp); // Memory Access } } } } // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ------------------------------------Kernel Call Wrappers ----------------------------------------------------------- at::Tensor mdconv_cuda_forward( at::Tensor input, at::Tensor log_filt, const float minusplus ) { //at::Tensor p_filt = (at::exp(log_filt)); // p_filt = p_filt.cumsum(1); const auto batch_sz = input.size(0); const auto im_height = input.size(2); const auto im_width = input.size(3); const auto im_nchans = input.size(1); const auto filt_num = log_filt.size(0); const auto filt_height = log_filt.size(2); const auto filt_width = log_filt.size(3); //printf("(%d,%d,%d,%d)\n", p_filt.size(0),p_filt.size(1),p_filt.size(2),p_filt.size(3)); //printf("filt_num:%d ",filt_num); auto out = at::zeros(input.type(),{batch_sz,filt_num,im_height,im_width}); //TODO: Remove except zero loop auto random = at::rand(input.type(),{filt_height,filt_width,batch_sz,filt_num,im_height,im_width}); const int totalOutPx = im_height*im_width*batch_sz*filt_num; // Single Loop const auto totalThreads = totalOutPx*filt_height*filt_width; const int totalThreads = totalOutPx; int j = 25; const int threadsperblock =j*32; int blockNum = (totalThreads/threadsperblock); if (totalThreads%threadsperblock != 0 ){ blockNum++; } const dim3 blocks(blockNum); //printf("blocks: %d, totaltherads/threadperbloc : %d", blocks,totalThreads/threadsperblock); AT_DISPATCH_FLOATING_TYPES(input.type(), "klconvs_forward_cuda", ([&] { mdconv_cuda_forward_kernel<scalar_t><<<blocks, threadsperblock>>>( input.data<scalar_t>(), log_filt.data<float>(), out.data<scalar_t>(), filt_width, filt_height, filt_num, im_width, im_height, im_nchans, batch_sz, totalOutPx, totalThreads, minusplus ); })); //out = out.sum(0); /// ZEro Loop Version \TODO: rremove in case of diff kernel gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return out; } std::vector<at::Tensor> mdconv_cuda_backward(at::Tensor dzdout, at::Tensor input, at::Tensor log_filt, const float minusplus ) { const auto batch_sz = input.size(0); const auto im_height = input.size(2); const auto im_width = input.size(3); const auto im_nchans = input.size(1); const auto filt_num = log_filt.size(0); const auto filt_height = log_filt.size(2); const auto filt_width = log_filt.size(3); auto dzdinput = at::zeros_like(input); auto dzdlfilt = at::zeros_like(log_filt); const int totalThreads = im_height*im_width*batch_sz*filt_num; int j = 25; //TODO: Make J chosen automatically. the shared memory is the bottleneck. const int threadsperblock =j*32; //printf("shared mem bytes %d - KB: %d, j:%d ",shared_per_block, shared_per_block/1024,j); int blockNum = (totalThreads/threadsperblock); if (totalThreads%threadsperblock != 0 ){ blockNum++; } const dim3 blocks(blockNum); AT_DISPATCH_FLOATING_TYPES(input.type(), "mdconv_backward_cuda", ([&] { mdconv_cuda_backward_kernel<scalar_t><<<blocks, threadsperblock>>>( // TODO : CHANGE KLCONVS AND KLCONV BACK AND FORTH. NO FORGET.... NEVER FORGET, it is easy not to seee. input.data<scalar_t>(), log_filt.data<scalar_t>(), dzdout.data<scalar_t>(), dzdinput.data<float>(), dzdlfilt.data<float>(), filt_width, filt_height, filt_num, im_width, im_height, im_nchans, batch_sz, totalThreads, minusplus ); })); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return {dzdinput, dzdlfilt}; }
a7be3cf450475124423b693560b20ba11ef76658.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #include "thrust/functional.h" #include "thrust/sort.h" namespace caffe { template <typename Dtype> __global__ void ScaleGPU(const int nthreads, const Dtype alpha, Dtype* X){ CUDA_KERNEL_LOOP(index, nthreads) { X[index] = alpha * X[index]; } } template <typename Dtype> __global__ void SoftmaxLossProbComputeGPU(const int nthreads, Dtype* prob_gt_data, const int n_step, const int c_step, const Dtype* label, const int dim1, const int channels, const int w, const int h, const int roi_, SoftmaxWithLossParameter_KernelType type_, Dtype gaussion_sigma_){ CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / channels; const int c = index % channels; int roi_w = roi_ * 2 + 1; int roi_h = roi_ * 2 + 1; Dtype *dst = prob_gt_data + n * n_step + c * c_step; // For each landmar Dtype pts_x = label[n * dim1 + c] * w; Dtype pts_y = label[n * dim1 + c + channels] * h; pts_x = pts_x - floorf(pts_x) + roi_; pts_y = pts_y - floorf(pts_y) + roi_; Dtype prob_normalizer = 0; for (int y = 0; y < roi_h; ++y) { for (int x = 0; x < roi_w; ++x) { switch (type_) { case SoftmaxWithLossParameter_KernelType_EXP: *dst = pow(0.5, MAX(fabs(x - pts_x), fabs(y - pts_y))); break; case SoftmaxWithLossParameter_KernelType_GAUSION: // -1/(2*PI*sigma^2)*exp(-0.5*d^2/sigma^2) *dst = 1 / (sqrt(2 * M_PI) * gaussion_sigma_) * exp(-0.5 * ((x - pts_x) * (x - pts_x) + (y - pts_y) * (y - pts_y)) / (gaussion_sigma_ * gaussion_sigma_)); break; default: *dst = pow(0.5, MAX(fabs(x - pts_x), fabs(y - pts_y))); } prob_normalizer += *dst; dst++; } } // Normalize dst = prob_gt_data + n * n_step + c * c_step; for (int i = 0; i < c_step; ++i) { dst[i] = dst[i] / prob_normalizer; } } } template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const bool has_hard_mining_label_, const int hard_mining_label_, const bool has_cutting_point_, Dtype cutting_point_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const int channels = dim / spatial_dim; if (has_cutting_point_ && prob_data[n * dim + label_value * spatial_dim + s] > cutting_point_ && (!has_hard_mining_label_ || hard_mining_label_ == label_value)) { for (int c = 0; c < channels; ++c) { prob_data[n * dim + c * spatial_dim + s] = 0; } prob_data[n * dim + label_value * spatial_dim + s] = 1; } if ((has_ignore_label_ && label_value == ignore_label_)) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxLossCrossEntropyForwardGPU(const int nthreads, Dtype* prob_gt_data, const int n_step, const int c_step, const Dtype* prob_data, const int dim0, const int dim, const Dtype* label, const int dim1, const int channels, Dtype* loss_data, const int w, const int h, const int roi_, Dtype* counts, const Dtype* weights, bool input_weights){ CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / channels; const int c = index % channels; // Center of the j-th Probability map const Dtype cx = label[n * dim1 + c] * w; const Dtype cy = label[n * dim1 + c + channels] * h; // Region for computing cross entropy loss // This is important. Must keep consistent with the groundtruth map. int x1 = roundf(cx) - roi_ > 0 ? roundf(cx) - roi_ : 0; int y1 = roundf(cy) - roi_ > 0 ? roundf(cy) - roi_ : 0; int x2 = roundf(cx) + roi_ < w ? roundf(cx) + roi_ : w - 1; int y2 = roundf(cy) + roi_ < h ? roundf(cy) + roi_ : h - 1; int roi_w = roi_ * 2 + 1; int xb = roundf(cx) - roi_; int yb = roundf(cy) - roi_; // Weights for each image. Dtype weight_value; if(input_weights){ weight_value = weights[n]; } loss_data[index] = 0; for (int y = y1; y <= y2; ++y) { for (int x = x1; x <= x2; ++x) { loss_data[index] -= prob_gt_data[n * n_step + c * c_step + (y - yb) * roi_w + x - xb] * log(MAX(prob_data[n * dim0 + c * dim + y * w + x], Dtype(FLT_MIN))); } } // Weight if(input_weights){ loss_data[index] *= weight_value; counts[index] = weight_value; }else{ counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxLossForwardWithWeightsGPU(const int nthreads, Dtype* prob_data, const Dtype* label, Dtype* loss, const Dtype* weights, const Dtype* class_weights, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const bool has_hard_mining_label_, const int hard_mining_label_, const bool has_cutting_point_, Dtype cutting_point_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype weight_value = weights[n * spatial_dim + s] * class_weights[label_value]; const int channels = dim / spatial_dim; if (has_cutting_point_ && prob_data[n * dim + label_value * spatial_dim + s] > cutting_point_ && (!has_hard_mining_label_ || hard_mining_label_ == label_value)) { for (int c = 0; c < channels; ++c) { prob_data[n * dim + c * spatial_dim + s] = 0; } prob_data[n * dim + label_value * spatial_dim + s] = 1; } if ((weight_value == 0) || (has_ignore_label_ && label_value == ignore_label_)) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -weight_value * log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = weight_value; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if(is_soft_classify_){ Forward_cpu(bottom, top); return ; } softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); Dtype* prob_data = prob_.mutable_gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data_test = loss_.mutable_cpu_data(); Dtype* loss_data = loss_.mutable_gpu_data(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = counts_.mutable_gpu_data(); if (bottom.size() == 2 && !is_soft_classify_) { // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, has_hard_mining_label_, hard_mining_label_, has_cutting_point_, cutting_point_, counts); } else if( (bottom.size() == 2 || bottom.size() == 3) && is_soft_classify_) { int num_images = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int dim0 = bottom[0]->count() / num_images; int dim1 = bottom[1]->count() / num_images; int h = sqrtf(bottom[0]->shape(2)); int w = h; // for (int n = 0; n < num_images; ++n) { if(is_profile_){ // Determine left or right profile Dtype nose_x = label[n * dim1 + 19] * w; Dtype contour_x = label[n * dim1 + 3] * w; if(nose_x>contour_x){ cur_profile_type_.push_back(2); // Right profile }else{ cur_profile_type_.push_back(1); // Left profile } } } // Weights const Dtype* weights; bool input_weights = false; if(bottom.size() == 3){ weights = bottom[2]->gpu_data(); input_weights = true; } // Generate groundtruth probability map Dtype *prob_gt_data = prob_groundtruth_.mutable_gpu_data(); int c_step = prob_groundtruth_.count(2); int n_step = prob_groundtruth_.count(1); // For Each channels const int nthreads1 = num_images * channels; // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossProbComputeGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads1), CAFFE_CUDA_NUM_THREADS >> >(nthreads1, prob_gt_data, n_step, c_step, label, dim1, channels, w, h, roi_, type_, gaussion_sigma_); // Visual //prob_gt_data = prob_groundtruth_.mutable_cpu_data(); //for (int y = 0; y < roi_h; ++y) { // for (int x = 0; x < roi_h; ++x) { // std::cout << prob_gt_data[y * roi_w + x] << " "; // } // std::cout << "" << std::endl; //} // Compute loss SoftmaxLossCrossEntropyForwardGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads1), CAFFE_CUDA_NUM_THREADS >> >(nthreads1, prob_gt_data, n_step, c_step, prob_data,dim0,dim, label, dim1, channels, loss_data, w, h, roi_, counts, weights, input_weights); } else if (bottom.size() == 3 && !is_soft_classify_) { const Dtype* weights = bottom[2]->gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardWithWeightsGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, prob_data, label, loss_data, weights, class_weight_.gpu_data(), outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, has_hard_mining_label_, hard_mining_label_, has_cutting_point_, cutting_point_, counts); } Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if ((normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) || (bottom.size() == 3)) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { if(!is_output_landmark_loss_){ top[1]->ShareData(prob_); } } } template <typename Dtype> __global__ void SoftmaxLossCrossEntropyBackwardGPU(const int nthreads, const Dtype* prob_gt_data, const int n_step, const int c_step, Dtype* bottom_diff, const int dim0, const int dim, const Dtype*label, const int dim1, const int channels, const int w, const int h, const int roi_, Dtype* counts, const Dtype* weights, bool input_weights){ CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / channels; const int c = index % channels; // Weights Dtype weight_value; if(input_weights){ weight_value = weights[n]; } // Center of the j-th Probability map. const Dtype cx = label[n * dim1 + c ] * w; const Dtype cy = label[n * dim1 + c + channels] * h; // Region for computing cross entropy loss // This is important. Must keep consistent with the groundtruth map. int x1 = roundf(cx - roi_) > 0 ? roundf(cx) - roi_ : 0; int y1 = roundf(cy - roi_) > 0 ? roundf(cy) - roi_ : 0; int x2 = roundf(cx + roi_) < w ? roundf(cx) + roi_ : w - 1; int y2 = roundf(cy + roi_) < h ? roundf(cy) + roi_ : h - 1; int roi_w = roi_ * 2 + 1; int xb = roundf(cx) - roi_; int yb = roundf(cy) - roi_; const Dtype* prob_gt_channel_data = prob_gt_data + n * n_step + c * c_step; for (int y = y1; y <= y2; ++y) { for (int x = x1; x <= x2; ++x){ bottom_diff[n * dim0 + c * dim + y * w + x] -= prob_gt_channel_data[ (y - yb) * roi_w + x - xb]; } } // Weights if(input_weights){ for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { bottom_diff[n * dim0 + c * dim + y * w + x] *= weight_value; } } counts[index] = weight_value; }else{ counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxLossBackwardWithWeightsGPU(const int nthreads, const Dtype* top, const Dtype* weights, const Dtype* class_weight, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype weight_value = weights[n * spatial_dim + s]; if ((has_ignore_label_ && label_value == ignore_label_) || (weight_value == 0)) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] *= weight_value * class_weight[c]; } counts[index] = weight_value; } } } template <typename Dtype> __global__ void Threshold(const int n, const Dtype* loss, Dtype threshold, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = loss[index] < threshold ? 0 : out[index]; } } template <typename Dtype> __global__ void ThresholdWithLabel(const int n, const Dtype* loss, Dtype threshold, const Dtype* label, Dtype hard_mining_label, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = (label[index] == hard_mining_label &&loss[index] < threshold) ? 0 : out[index]; } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { /*if(is_soft_classify_){ Backward_cpu(top, propagate_down, bottom); return ; }*/ if (has_hard_ratio_ && bottom.size() == 3) { caffe_copy(outer_num_ * inner_num_, loss_.cpu_data(), loss_.mutable_cpu_diff()); std::sort(loss_.mutable_cpu_diff(), loss_.mutable_cpu_diff() + outer_num_ * inner_num_);//thrust::sort Dtype loss_threshold = loss_.cpu_diff()[(int)(outer_num_ * inner_num_ * (1 - hard_ratio_))]; if (has_hard_mining_label_) { // NOLINT_NEXT_LINE(whitespace/operators) ThresholdWithLabel<Dtype> << <CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS >> >( outer_num_ * inner_num_, loss_.gpu_data(), loss_threshold, bottom[1]->gpu_data(), hard_mining_label_, bottom[2]->mutable_gpu_data()); } else { // NOLINT_NEXT_LINE(whitespace/operators) Threshold<Dtype> << <CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS >> >( outer_num_ * inner_num_, loss_.gpu_data(), loss_threshold, bottom[2]->mutable_gpu_data()); } } Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = counts_.mutable_gpu_data(); if (bottom.size() == 2 && !is_soft_classify_) { // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); } else if( (bottom.size() == 2 || bottom.size() == 3) && is_soft_classify_){ int num_images = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int dim0 = bottom[0]->count()/num_images; int dim1 = bottom[1]->count()/num_images; int w = sqrtf(bottom[0]->shape(2)); int h = w; const Dtype* prob_gt_data = prob_groundtruth_.gpu_data(); int n_step = prob_groundtruth_.count(1); int c_step = prob_groundtruth_.count(2); // Weights of each image const Dtype* weights; bool input_weights = false; if(bottom.size() == 3){ weights = bottom[2]->gpu_data(); input_weights = true; } // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossCrossEntropyBackwardGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, prob_gt_data, n_step, c_step, bottom_diff, dim0, dim, label, dim1, channels, w, h, roi_, counts, weights, input_weights); // For profile landmark detection. for (int n = 0; n < num_images; ++n) { // Set diff to zero if the profile type is different. if(is_profile_ && profile_type_ != cur_profile_type_[n]) { caffe_gpu_set(dim0, Dtype(0), bottom_diff + n * dim0); } } } else if (bottom.size() == 3 && !is_soft_classify_) { const Dtype* weights = bottom[2]->gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardWithWeightsGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, top_data, weights, class_weight_.gpu_data(), label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); } Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if ((normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) || (bottom.size() == 3)) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
a7be3cf450475124423b693560b20ba11ef76658.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #include "thrust/functional.h" #include "thrust/sort.h" namespace caffe { template <typename Dtype> __global__ void ScaleGPU(const int nthreads, const Dtype alpha, Dtype* X){ CUDA_KERNEL_LOOP(index, nthreads) { X[index] = alpha * X[index]; } } template <typename Dtype> __global__ void SoftmaxLossProbComputeGPU(const int nthreads, Dtype* prob_gt_data, const int n_step, const int c_step, const Dtype* label, const int dim1, const int channels, const int w, const int h, const int roi_, SoftmaxWithLossParameter_KernelType type_, Dtype gaussion_sigma_){ CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / channels; const int c = index % channels; int roi_w = roi_ * 2 + 1; int roi_h = roi_ * 2 + 1; Dtype *dst = prob_gt_data + n * n_step + c * c_step; // For each landmar Dtype pts_x = label[n * dim1 + c] * w; Dtype pts_y = label[n * dim1 + c + channels] * h; pts_x = pts_x - floorf(pts_x) + roi_; pts_y = pts_y - floorf(pts_y) + roi_; Dtype prob_normalizer = 0; for (int y = 0; y < roi_h; ++y) { for (int x = 0; x < roi_w; ++x) { switch (type_) { case SoftmaxWithLossParameter_KernelType_EXP: *dst = pow(0.5, MAX(fabs(x - pts_x), fabs(y - pts_y))); break; case SoftmaxWithLossParameter_KernelType_GAUSION: // -1/(2*PI*sigma^2)*exp(-0.5*d^2/sigma^2) *dst = 1 / (sqrt(2 * M_PI) * gaussion_sigma_) * exp(-0.5 * ((x - pts_x) * (x - pts_x) + (y - pts_y) * (y - pts_y)) / (gaussion_sigma_ * gaussion_sigma_)); break; default: *dst = pow(0.5, MAX(fabs(x - pts_x), fabs(y - pts_y))); } prob_normalizer += *dst; dst++; } } // Normalize dst = prob_gt_data + n * n_step + c * c_step; for (int i = 0; i < c_step; ++i) { dst[i] = dst[i] / prob_normalizer; } } } template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const bool has_hard_mining_label_, const int hard_mining_label_, const bool has_cutting_point_, Dtype cutting_point_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const int channels = dim / spatial_dim; if (has_cutting_point_ && prob_data[n * dim + label_value * spatial_dim + s] > cutting_point_ && (!has_hard_mining_label_ || hard_mining_label_ == label_value)) { for (int c = 0; c < channels; ++c) { prob_data[n * dim + c * spatial_dim + s] = 0; } prob_data[n * dim + label_value * spatial_dim + s] = 1; } if ((has_ignore_label_ && label_value == ignore_label_)) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxLossCrossEntropyForwardGPU(const int nthreads, Dtype* prob_gt_data, const int n_step, const int c_step, const Dtype* prob_data, const int dim0, const int dim, const Dtype* label, const int dim1, const int channels, Dtype* loss_data, const int w, const int h, const int roi_, Dtype* counts, const Dtype* weights, bool input_weights){ CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / channels; const int c = index % channels; // Center of the j-th Probability map const Dtype cx = label[n * dim1 + c] * w; const Dtype cy = label[n * dim1 + c + channels] * h; // Region for computing cross entropy loss // This is important. Must keep consistent with the groundtruth map. int x1 = roundf(cx) - roi_ > 0 ? roundf(cx) - roi_ : 0; int y1 = roundf(cy) - roi_ > 0 ? roundf(cy) - roi_ : 0; int x2 = roundf(cx) + roi_ < w ? roundf(cx) + roi_ : w - 1; int y2 = roundf(cy) + roi_ < h ? roundf(cy) + roi_ : h - 1; int roi_w = roi_ * 2 + 1; int xb = roundf(cx) - roi_; int yb = roundf(cy) - roi_; // Weights for each image. Dtype weight_value; if(input_weights){ weight_value = weights[n]; } loss_data[index] = 0; for (int y = y1; y <= y2; ++y) { for (int x = x1; x <= x2; ++x) { loss_data[index] -= prob_gt_data[n * n_step + c * c_step + (y - yb) * roi_w + x - xb] * log(MAX(prob_data[n * dim0 + c * dim + y * w + x], Dtype(FLT_MIN))); } } // Weight if(input_weights){ loss_data[index] *= weight_value; counts[index] = weight_value; }else{ counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxLossForwardWithWeightsGPU(const int nthreads, Dtype* prob_data, const Dtype* label, Dtype* loss, const Dtype* weights, const Dtype* class_weights, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const bool has_hard_mining_label_, const int hard_mining_label_, const bool has_cutting_point_, Dtype cutting_point_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype weight_value = weights[n * spatial_dim + s] * class_weights[label_value]; const int channels = dim / spatial_dim; if (has_cutting_point_ && prob_data[n * dim + label_value * spatial_dim + s] > cutting_point_ && (!has_hard_mining_label_ || hard_mining_label_ == label_value)) { for (int c = 0; c < channels; ++c) { prob_data[n * dim + c * spatial_dim + s] = 0; } prob_data[n * dim + label_value * spatial_dim + s] = 1; } if ((weight_value == 0) || (has_ignore_label_ && label_value == ignore_label_)) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -weight_value * log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = weight_value; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if(is_soft_classify_){ Forward_cpu(bottom, top); return ; } softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); Dtype* prob_data = prob_.mutable_gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data_test = loss_.mutable_cpu_data(); Dtype* loss_data = loss_.mutable_gpu_data(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = counts_.mutable_gpu_data(); if (bottom.size() == 2 && !is_soft_classify_) { // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, has_hard_mining_label_, hard_mining_label_, has_cutting_point_, cutting_point_, counts); } else if( (bottom.size() == 2 || bottom.size() == 3) && is_soft_classify_) { int num_images = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int dim0 = bottom[0]->count() / num_images; int dim1 = bottom[1]->count() / num_images; int h = sqrtf(bottom[0]->shape(2)); int w = h; // for (int n = 0; n < num_images; ++n) { if(is_profile_){ // Determine left or right profile Dtype nose_x = label[n * dim1 + 19] * w; Dtype contour_x = label[n * dim1 + 3] * w; if(nose_x>contour_x){ cur_profile_type_.push_back(2); // Right profile }else{ cur_profile_type_.push_back(1); // Left profile } } } // Weights const Dtype* weights; bool input_weights = false; if(bottom.size() == 3){ weights = bottom[2]->gpu_data(); input_weights = true; } // Generate groundtruth probability map Dtype *prob_gt_data = prob_groundtruth_.mutable_gpu_data(); int c_step = prob_groundtruth_.count(2); int n_step = prob_groundtruth_.count(1); // For Each channels const int nthreads1 = num_images * channels; // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossProbComputeGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads1), CAFFE_CUDA_NUM_THREADS >> >(nthreads1, prob_gt_data, n_step, c_step, label, dim1, channels, w, h, roi_, type_, gaussion_sigma_); // Visual //prob_gt_data = prob_groundtruth_.mutable_cpu_data(); //for (int y = 0; y < roi_h; ++y) { // for (int x = 0; x < roi_h; ++x) { // std::cout << prob_gt_data[y * roi_w + x] << " "; // } // std::cout << "" << std::endl; //} // Compute loss SoftmaxLossCrossEntropyForwardGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads1), CAFFE_CUDA_NUM_THREADS >> >(nthreads1, prob_gt_data, n_step, c_step, prob_data,dim0,dim, label, dim1, channels, loss_data, w, h, roi_, counts, weights, input_weights); } else if (bottom.size() == 3 && !is_soft_classify_) { const Dtype* weights = bottom[2]->gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardWithWeightsGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, prob_data, label, loss_data, weights, class_weight_.gpu_data(), outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, has_hard_mining_label_, hard_mining_label_, has_cutting_point_, cutting_point_, counts); } Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if ((normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) || (bottom.size() == 3)) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { if(!is_output_landmark_loss_){ top[1]->ShareData(prob_); } } } template <typename Dtype> __global__ void SoftmaxLossCrossEntropyBackwardGPU(const int nthreads, const Dtype* prob_gt_data, const int n_step, const int c_step, Dtype* bottom_diff, const int dim0, const int dim, const Dtype*label, const int dim1, const int channels, const int w, const int h, const int roi_, Dtype* counts, const Dtype* weights, bool input_weights){ CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / channels; const int c = index % channels; // Weights Dtype weight_value; if(input_weights){ weight_value = weights[n]; } // Center of the j-th Probability map. const Dtype cx = label[n * dim1 + c ] * w; const Dtype cy = label[n * dim1 + c + channels] * h; // Region for computing cross entropy loss // This is important. Must keep consistent with the groundtruth map. int x1 = roundf(cx - roi_) > 0 ? roundf(cx) - roi_ : 0; int y1 = roundf(cy - roi_) > 0 ? roundf(cy) - roi_ : 0; int x2 = roundf(cx + roi_) < w ? roundf(cx) + roi_ : w - 1; int y2 = roundf(cy + roi_) < h ? roundf(cy) + roi_ : h - 1; int roi_w = roi_ * 2 + 1; int xb = roundf(cx) - roi_; int yb = roundf(cy) - roi_; const Dtype* prob_gt_channel_data = prob_gt_data + n * n_step + c * c_step; for (int y = y1; y <= y2; ++y) { for (int x = x1; x <= x2; ++x){ bottom_diff[n * dim0 + c * dim + y * w + x] -= prob_gt_channel_data[ (y - yb) * roi_w + x - xb]; } } // Weights if(input_weights){ for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { bottom_diff[n * dim0 + c * dim + y * w + x] *= weight_value; } } counts[index] = weight_value; }else{ counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> __global__ void SoftmaxLossBackwardWithWeightsGPU(const int nthreads, const Dtype* top, const Dtype* weights, const Dtype* class_weight, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype weight_value = weights[n * spatial_dim + s]; if ((has_ignore_label_ && label_value == ignore_label_) || (weight_value == 0)) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] *= weight_value * class_weight[c]; } counts[index] = weight_value; } } } template <typename Dtype> __global__ void Threshold(const int n, const Dtype* loss, Dtype threshold, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = loss[index] < threshold ? 0 : out[index]; } } template <typename Dtype> __global__ void ThresholdWithLabel(const int n, const Dtype* loss, Dtype threshold, const Dtype* label, Dtype hard_mining_label, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = (label[index] == hard_mining_label &&loss[index] < threshold) ? 0 : out[index]; } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { /*if(is_soft_classify_){ Backward_cpu(top, propagate_down, bottom); return ; }*/ if (has_hard_ratio_ && bottom.size() == 3) { caffe_copy(outer_num_ * inner_num_, loss_.cpu_data(), loss_.mutable_cpu_diff()); std::sort(loss_.mutable_cpu_diff(), loss_.mutable_cpu_diff() + outer_num_ * inner_num_);//thrust::sort Dtype loss_threshold = loss_.cpu_diff()[(int)(outer_num_ * inner_num_ * (1 - hard_ratio_))]; if (has_hard_mining_label_) { // NOLINT_NEXT_LINE(whitespace/operators) ThresholdWithLabel<Dtype> << <CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS >> >( outer_num_ * inner_num_, loss_.gpu_data(), loss_threshold, bottom[1]->gpu_data(), hard_mining_label_, bottom[2]->mutable_gpu_data()); } else { // NOLINT_NEXT_LINE(whitespace/operators) Threshold<Dtype> << <CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS >> >( outer_num_ * inner_num_, loss_.gpu_data(), loss_threshold, bottom[2]->mutable_gpu_data()); } } Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = counts_.mutable_gpu_data(); if (bottom.size() == 2 && !is_soft_classify_) { // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); } else if( (bottom.size() == 2 || bottom.size() == 3) && is_soft_classify_){ int num_images = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int dim0 = bottom[0]->count()/num_images; int dim1 = bottom[1]->count()/num_images; int w = sqrtf(bottom[0]->shape(2)); int h = w; const Dtype* prob_gt_data = prob_groundtruth_.gpu_data(); int n_step = prob_groundtruth_.count(1); int c_step = prob_groundtruth_.count(2); // Weights of each image const Dtype* weights; bool input_weights = false; if(bottom.size() == 3){ weights = bottom[2]->gpu_data(); input_weights = true; } // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossCrossEntropyBackwardGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, prob_gt_data, n_step, c_step, bottom_diff, dim0, dim, label, dim1, channels, w, h, roi_, counts, weights, input_weights); // For profile landmark detection. for (int n = 0; n < num_images; ++n) { // Set diff to zero if the profile type is different. if(is_profile_ && profile_type_ != cur_profile_type_[n]) { caffe_gpu_set(dim0, Dtype(0), bottom_diff + n * dim0); } } } else if (bottom.size() == 3 && !is_soft_classify_) { const Dtype* weights = bottom[2]->gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardWithWeightsGPU<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(nthreads, top_data, weights, class_weight_.gpu_data(), label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); } Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if ((normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) || (bottom.size() == 3)) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
020d7d955bfc8d7247bf07d87976bcb188875db4.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hipfft.h> #include <hip/hip_complex.h> #include <helper_cuda.h> #include "dnsparams.h" #include "cudafuncs.h" #include "fftfuncs.h" //============================================================================== // Transpose algorithm //============================================================================== __global__ void organizeData(hipfftDoubleComplex *in, hipfftDoubleComplex *out, int N, int j) {// Function to grab non-contiguous chunks of data and make them contiguous const int k = blockIdx.x * blockDim.x + threadIdx.x; if(k >= NZ2) return; for(int i=0; i<N; ++i){ // printf("For thread %d, indexing begins at local index of %d, which maps to temp at location %d\n", k, (k+ NZ*j), k); out[k + i*NZ2] = in[k + NZ2*j + i*NY*NZ2]; } return; } __global__ void organizeData_coalesced(hipfftDoubleComplex *in, hipfftDoubleComplex *out, int N, int j) {// Function to grab non-contiguous chunks of data and make them contiguous const int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= N) return; for(int k=0; k<NZ2; ++k){ // printf("For thread %d, indexing begins at local index of %d, which maps to temp at location %d\n", k, (k+ NZ*j), k); out[k + i*NZ2] = in[k + NZ2*j + i*NY*NZ2]; } return; } __global__ void organizeData_2d(hipfftDoubleComplex *in, hipfftDoubleComplex *out, int N, int j) {// Function to grab non-contiguous chunks of data and make them contiguous const int i = blockIdx.x * blockDim.x + threadIdx.x; const int k = blockIdx.y * blockDim.y + threadIdx.y; if(i >= N || k >= NZ2) return; out[k + i*NZ2] = in[k + NZ2*j + i*NY*NZ2]; return; } void transpose_xy_mgpu(gpuinfo gpu, hipfftDoubleComplex **src, hipfftDoubleComplex **dst, hipfftDoubleComplex **temp) { // Transpose x and y directions (for a z-contiguous 1d array distributed across multiple GPUs) // This function loops through GPUs to do the transpose. Requires extra conversion to calculate the local index at the source location. // printf("Taking Transpose...\n"); int n, j, local_idx_dst, dstNum; for(j=0; j<NY; ++j){ for(n=0; n<gpu.nGPUs; ++n){ hipSetDevice(n); // Determine which GPU to send data to based on y-index, j dstNum = (j*gpu.nGPUs)/NY; const dim3 blockSize(TX, TZ, 1); const dim3 gridSize(divUp(NX, TX), divUp(NZ2, TZ), 1); // Open kernel that grabs all data hipLaunchKernelGGL(( organizeData_2d), dim3(gridSize),dim3(blockSize), 0, 0, src[n], temp[n], gpu.nx[n], j); local_idx_dst = gpu.start_x[n]*NZ2 + (j - gpu.start_y[dstNum])*NZ2*NX; checkCudaErrors( hipMemcpyAsync(&dst[dstNum][local_idx_dst], temp[n], sizeof( hipfftDoubleComplex )*NZ2*gpu.nx[n], hipMemcpyDefault) ); } } return; } //============================================================================== // FFT functions //============================================================================== void plan2dFFT(gpuinfo gpu, fftinfo fft){ // This function plans a 2-dimensional FFT to operate on the Z and Y directions (assumes Z-direction is contiguous in memory) int result; int n; for(n = 0; n<gpu.nGPUs; ++n){ hipSetDevice(n); //Create plan for 2-D cuFFT, set cuFFT parameters int rank = 2; int size[] = {NY,NZ}; int inembed[] = {NY,2*NZ2}; // inembed measures distance between dimensions of data int onembed[] = {NY,NZ2}; // Uses half the domain for a R2C transform int istride = 1; // istride is distance between consecutive elements int ostride = 1; int idist = NY*2*NZ2; // idist is the total length of one signal int odist = NY*NZ2; int batch = gpu.nx[n]; // # of 2D FFTs to perform // Create empty plan handles hipfftCreate(&fft.p2d[n]); hipfftCreate(&fft.invp2d[n]); // Disable auto allocation of workspace memory for cuFFT plans result = hipfftSetAutoAllocation(fft.p2d[n], 0); if ( result != HIPFFT_SUCCESS){ printf("CUFFT error: hipfftSetAutoAllocation failed on line %d, Error code %d\n", __LINE__, result); return; } result = hipfftSetAutoAllocation(fft.invp2d[n], 0); if ( result != HIPFFT_SUCCESS){ printf("CUFFT error: hipfftSetAutoAllocation failed on line %d, Error code %d\n", __LINE__, result); return; } // Plan Forward 2DFFT result = hipfftMakePlanMany(fft.p2d[n], rank, size, inembed, istride, idist, onembed, ostride, odist, HIPFFT_D2Z, batch, &fft.wsize_f[n]); if ( result != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT error: cufftPlanforward 2D failed"); printf(", Error code %d\n", result); return; } // Plan inverse 2DFFT result = hipfftMakePlanMany(fft.invp2d[n], rank, size, onembed, ostride, odist, inembed, istride, idist, HIPFFT_Z2D, batch, &fft.wsize_i[n]); if ( result != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT error: cufftPlanforward 2D failed"); printf(", Error code %d\n", result); return; } printf("The workspace size required for the forward transform is %lu.\n", fft.wsize_f[n]); // printf("The workspace size required for the inverse transform is %lu.\n", fft.wsize_i[n]); // Assuming that both workspaces are the same size (seems to be generally true), then the two workspaces can share an allocation - need to use maximum value here // Allocate workspace memory checkCudaErrors( hipMalloc(&fft.wspace[n], fft.wsize_f[n]) ); // Set cuFFT to use allocated workspace memory result = hipfftSetWorkArea(fft.p2d[n], fft.wspace[n]); if ( result != HIPFFT_SUCCESS){ printf("CUFFT error: ExecD2Z failed on line %d, Error code %d\n", __LINE__, result); return; } result = hipfftSetWorkArea(fft.invp2d[n], fft.wspace[n]); if ( result != HIPFFT_SUCCESS){ printf("CUFFT error: ExecD2Z failed on line %d, Error code %d\n", __LINE__, result); return; } } return; } void plan1dFFT(int nGPUs, fftinfo fft){ // This function plans a 1-dimensional FFT to operate on the X direction (for X-direction not contiguous in memory, offset by Z-dimension) int result; int n; for(n = 0; n<nGPUs; ++n){ hipSetDevice(n); //Create plan for cuFFT, set cuFFT parameters int rank = 1; // Dimensionality of the FFT - constant at rank 1 int size[] = {NX}; // size of each rank int inembed[] = {0}; // inembed measures distance between dimensions of data int onembed[] = {0}; // For complex to complex transform, input and output data have same dimensions int istride = NZ2; // istride is distance between consecutive elements int ostride = NZ2; int idist = 1; // idist is the total length of one signal int odist = 1; int batch = NZ2; // # of 1D FFTs to perform (assuming data has been transformed previously in the Z-Y directions) // Plan Forward 1DFFT result = hipfftPlanMany(&fft.p1d[n], rank, size, inembed, istride, idist, onembed, ostride, odist, HIPFFT_Z2Z, batch); if ( result != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT error: cufftPlanforward failed"); return; } } return; } void Execute1DFFT_Forward(hipfftHandle plan, int NY_per_GPU, hipfftDoubleComplex *f, hipfftDoubleComplex *fhat) { hipfftResult result; // Loop through each slab in the Y-direction // Perform forward FFT for(int i=0; i<NY_per_GPU; ++i){ result = hipfftExecZ2Z(plan, &f[i*NZ2*NX], &fhat[i*NZ2*NX], HIPFFT_FORWARD); if ( result != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT error: ExecZ2Z failed, error code %d\n",(int)result); return; } } return; } void Execute1DFFT_Inverse(hipfftHandle plan, int NY_per_GPU, hipfftDoubleComplex *fhat, hipfftDoubleComplex *f) { hipfftResult result; // Loop through each slab in the Y-direction // Perform forward FFT for(int i=0; i<NY_per_GPU; ++i){ result = hipfftExecZ2Z(plan, &fhat[i*NZ2*NX], &f[i*NZ2*NX], HIPFFT_BACKWARD); if ( result != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT error: ExecZ2Z failed, error code %d\n",(int)result); return; } } return; } void forwardTransform(fftinfo fft, gpuinfo gpu, hipfftDoubleReal **f ) { // Transform from physical to wave domain int RESULT, n; // Take FFT in Z and Y directions for(n = 0; n<gpu.nGPUs; ++n){ hipSetDevice(n); RESULT = hipfftExecD2Z(fft.p2d[n], f[n], (hipfftDoubleComplex *)f[n]); if ( RESULT != HIPFFT_SUCCESS){ printf("CUFFT error: ExecD2Z failed on line %d, Error code %d\n", __LINE__, RESULT); return; } // printf("Taking 2D forward FFT on GPU #%2d\n",n); } // Transpose X and Y dimensions transpose_xy_mgpu(gpu, (hipfftDoubleComplex **)f, fft.temp, fft.temp_reorder); // Take FFT in X direction (which has been transposed to what used to be the Y dimension) for(n = 0; n<gpu.nGPUs; ++n){ hipSetDevice(n); Execute1DFFT_Forward(fft.p1d[n], gpu.ny[n], fft.temp[n], (hipfftDoubleComplex *)f[n]); // printf("Taking 1D forward FFT on GPU #%2d\n",n); } // Results remain in transposed coordinates // printf("Forward Transform Completed...\n"); return; } void inverseTransform(fftinfo fft, gpuinfo gpu, hipfftDoubleComplex **f) { // Transform variables from wavespace to the physical domain int RESULT, n; // Data starts in transposed coordinates, x,y flipped // Take FFT in X direction (which has been transposed to what used to be the Y dimension) for(n = 0; n<gpu.nGPUs; ++n){ hipSetDevice(n); Execute1DFFT_Inverse(fft.p1d[n], gpu.ny[n], f[n], fft.temp[n]); // printf("Taking 1D inverse FFT on GPU #%2d\n",n); } // Transpose X and Y directions transpose_xy_mgpu(gpu, fft.temp, f, fft.temp_reorder); for(n = 0; n<gpu.nGPUs; ++n){ hipSetDevice(n); // Take inverse FFT in Z and Y direction RESULT = hipfftExecZ2D(fft.invp2d[n], f[n], (hipfftDoubleReal *)f[n]); if ( RESULT != HIPFFT_SUCCESS){ printf("CUFFT error: ExecD2Z failed on line %d, Error code %d\n", __LINE__, RESULT); return; } // printf("Taking 2D inverse FFT on GPU #%2d\n",n); } for(n = 0; n<gpu.nGPUs; ++n){ hipSetDevice(n); const dim3 blockSize(TX, TY, TZ); const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), divUp(NZ, TZ)); hipLaunchKernelGGL(( scaleKernel_mgpu), dim3(gridSize), dim3(blockSize), 0, 0, gpu.start_x[n], (hipfftDoubleReal *)f[n]); } // printf("Scaled Inverse Transform Completed...\n"); return; }
020d7d955bfc8d7247bf07d87976bcb188875db4.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cufft.h> #include <cuComplex.h> #include <helper_cuda.h> #include "dnsparams.h" #include "cudafuncs.h" #include "fftfuncs.h" //============================================================================== // Transpose algorithm //============================================================================== __global__ void organizeData(cufftDoubleComplex *in, cufftDoubleComplex *out, int N, int j) {// Function to grab non-contiguous chunks of data and make them contiguous const int k = blockIdx.x * blockDim.x + threadIdx.x; if(k >= NZ2) return; for(int i=0; i<N; ++i){ // printf("For thread %d, indexing begins at local index of %d, which maps to temp at location %d\n", k, (k+ NZ*j), k); out[k + i*NZ2] = in[k + NZ2*j + i*NY*NZ2]; } return; } __global__ void organizeData_coalesced(cufftDoubleComplex *in, cufftDoubleComplex *out, int N, int j) {// Function to grab non-contiguous chunks of data and make them contiguous const int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= N) return; for(int k=0; k<NZ2; ++k){ // printf("For thread %d, indexing begins at local index of %d, which maps to temp at location %d\n", k, (k+ NZ*j), k); out[k + i*NZ2] = in[k + NZ2*j + i*NY*NZ2]; } return; } __global__ void organizeData_2d(cufftDoubleComplex *in, cufftDoubleComplex *out, int N, int j) {// Function to grab non-contiguous chunks of data and make them contiguous const int i = blockIdx.x * blockDim.x + threadIdx.x; const int k = blockIdx.y * blockDim.y + threadIdx.y; if(i >= N || k >= NZ2) return; out[k + i*NZ2] = in[k + NZ2*j + i*NY*NZ2]; return; } void transpose_xy_mgpu(gpuinfo gpu, cufftDoubleComplex **src, cufftDoubleComplex **dst, cufftDoubleComplex **temp) { // Transpose x and y directions (for a z-contiguous 1d array distributed across multiple GPUs) // This function loops through GPUs to do the transpose. Requires extra conversion to calculate the local index at the source location. // printf("Taking Transpose...\n"); int n, j, local_idx_dst, dstNum; for(j=0; j<NY; ++j){ for(n=0; n<gpu.nGPUs; ++n){ cudaSetDevice(n); // Determine which GPU to send data to based on y-index, j dstNum = (j*gpu.nGPUs)/NY; const dim3 blockSize(TX, TZ, 1); const dim3 gridSize(divUp(NX, TX), divUp(NZ2, TZ), 1); // Open kernel that grabs all data organizeData_2d<<<gridSize,blockSize>>>(src[n], temp[n], gpu.nx[n], j); local_idx_dst = gpu.start_x[n]*NZ2 + (j - gpu.start_y[dstNum])*NZ2*NX; checkCudaErrors( cudaMemcpyAsync(&dst[dstNum][local_idx_dst], temp[n], sizeof( cufftDoubleComplex )*NZ2*gpu.nx[n], cudaMemcpyDefault) ); } } return; } //============================================================================== // FFT functions //============================================================================== void plan2dFFT(gpuinfo gpu, fftinfo fft){ // This function plans a 2-dimensional FFT to operate on the Z and Y directions (assumes Z-direction is contiguous in memory) int result; int n; for(n = 0; n<gpu.nGPUs; ++n){ cudaSetDevice(n); //Create plan for 2-D cuFFT, set cuFFT parameters int rank = 2; int size[] = {NY,NZ}; int inembed[] = {NY,2*NZ2}; // inembed measures distance between dimensions of data int onembed[] = {NY,NZ2}; // Uses half the domain for a R2C transform int istride = 1; // istride is distance between consecutive elements int ostride = 1; int idist = NY*2*NZ2; // idist is the total length of one signal int odist = NY*NZ2; int batch = gpu.nx[n]; // # of 2D FFTs to perform // Create empty plan handles cufftCreate(&fft.p2d[n]); cufftCreate(&fft.invp2d[n]); // Disable auto allocation of workspace memory for cuFFT plans result = cufftSetAutoAllocation(fft.p2d[n], 0); if ( result != CUFFT_SUCCESS){ printf("CUFFT error: cufftSetAutoAllocation failed on line %d, Error code %d\n", __LINE__, result); return; } result = cufftSetAutoAllocation(fft.invp2d[n], 0); if ( result != CUFFT_SUCCESS){ printf("CUFFT error: cufftSetAutoAllocation failed on line %d, Error code %d\n", __LINE__, result); return; } // Plan Forward 2DFFT result = cufftMakePlanMany(fft.p2d[n], rank, size, inembed, istride, idist, onembed, ostride, odist, CUFFT_D2Z, batch, &fft.wsize_f[n]); if ( result != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT error: cufftPlanforward 2D failed"); printf(", Error code %d\n", result); return; } // Plan inverse 2DFFT result = cufftMakePlanMany(fft.invp2d[n], rank, size, onembed, ostride, odist, inembed, istride, idist, CUFFT_Z2D, batch, &fft.wsize_i[n]); if ( result != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT error: cufftPlanforward 2D failed"); printf(", Error code %d\n", result); return; } printf("The workspace size required for the forward transform is %lu.\n", fft.wsize_f[n]); // printf("The workspace size required for the inverse transform is %lu.\n", fft.wsize_i[n]); // Assuming that both workspaces are the same size (seems to be generally true), then the two workspaces can share an allocation - need to use maximum value here // Allocate workspace memory checkCudaErrors( cudaMalloc(&fft.wspace[n], fft.wsize_f[n]) ); // Set cuFFT to use allocated workspace memory result = cufftSetWorkArea(fft.p2d[n], fft.wspace[n]); if ( result != CUFFT_SUCCESS){ printf("CUFFT error: ExecD2Z failed on line %d, Error code %d\n", __LINE__, result); return; } result = cufftSetWorkArea(fft.invp2d[n], fft.wspace[n]); if ( result != CUFFT_SUCCESS){ printf("CUFFT error: ExecD2Z failed on line %d, Error code %d\n", __LINE__, result); return; } } return; } void plan1dFFT(int nGPUs, fftinfo fft){ // This function plans a 1-dimensional FFT to operate on the X direction (for X-direction not contiguous in memory, offset by Z-dimension) int result; int n; for(n = 0; n<nGPUs; ++n){ cudaSetDevice(n); //Create plan for cuFFT, set cuFFT parameters int rank = 1; // Dimensionality of the FFT - constant at rank 1 int size[] = {NX}; // size of each rank int inembed[] = {0}; // inembed measures distance between dimensions of data int onembed[] = {0}; // For complex to complex transform, input and output data have same dimensions int istride = NZ2; // istride is distance between consecutive elements int ostride = NZ2; int idist = 1; // idist is the total length of one signal int odist = 1; int batch = NZ2; // # of 1D FFTs to perform (assuming data has been transformed previously in the Z-Y directions) // Plan Forward 1DFFT result = cufftPlanMany(&fft.p1d[n], rank, size, inembed, istride, idist, onembed, ostride, odist, CUFFT_Z2Z, batch); if ( result != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT error: cufftPlanforward failed"); return; } } return; } void Execute1DFFT_Forward(cufftHandle plan, int NY_per_GPU, cufftDoubleComplex *f, cufftDoubleComplex *fhat) { cufftResult result; // Loop through each slab in the Y-direction // Perform forward FFT for(int i=0; i<NY_per_GPU; ++i){ result = cufftExecZ2Z(plan, &f[i*NZ2*NX], &fhat[i*NZ2*NX], CUFFT_FORWARD); if ( result != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT error: ExecZ2Z failed, error code %d\n",(int)result); return; } } return; } void Execute1DFFT_Inverse(cufftHandle plan, int NY_per_GPU, cufftDoubleComplex *fhat, cufftDoubleComplex *f) { cufftResult result; // Loop through each slab in the Y-direction // Perform forward FFT for(int i=0; i<NY_per_GPU; ++i){ result = cufftExecZ2Z(plan, &fhat[i*NZ2*NX], &f[i*NZ2*NX], CUFFT_INVERSE); if ( result != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT error: ExecZ2Z failed, error code %d\n",(int)result); return; } } return; } void forwardTransform(fftinfo fft, gpuinfo gpu, cufftDoubleReal **f ) { // Transform from physical to wave domain int RESULT, n; // Take FFT in Z and Y directions for(n = 0; n<gpu.nGPUs; ++n){ cudaSetDevice(n); RESULT = cufftExecD2Z(fft.p2d[n], f[n], (cufftDoubleComplex *)f[n]); if ( RESULT != CUFFT_SUCCESS){ printf("CUFFT error: ExecD2Z failed on line %d, Error code %d\n", __LINE__, RESULT); return; } // printf("Taking 2D forward FFT on GPU #%2d\n",n); } // Transpose X and Y dimensions transpose_xy_mgpu(gpu, (cufftDoubleComplex **)f, fft.temp, fft.temp_reorder); // Take FFT in X direction (which has been transposed to what used to be the Y dimension) for(n = 0; n<gpu.nGPUs; ++n){ cudaSetDevice(n); Execute1DFFT_Forward(fft.p1d[n], gpu.ny[n], fft.temp[n], (cufftDoubleComplex *)f[n]); // printf("Taking 1D forward FFT on GPU #%2d\n",n); } // Results remain in transposed coordinates // printf("Forward Transform Completed...\n"); return; } void inverseTransform(fftinfo fft, gpuinfo gpu, cufftDoubleComplex **f) { // Transform variables from wavespace to the physical domain int RESULT, n; // Data starts in transposed coordinates, x,y flipped // Take FFT in X direction (which has been transposed to what used to be the Y dimension) for(n = 0; n<gpu.nGPUs; ++n){ cudaSetDevice(n); Execute1DFFT_Inverse(fft.p1d[n], gpu.ny[n], f[n], fft.temp[n]); // printf("Taking 1D inverse FFT on GPU #%2d\n",n); } // Transpose X and Y directions transpose_xy_mgpu(gpu, fft.temp, f, fft.temp_reorder); for(n = 0; n<gpu.nGPUs; ++n){ cudaSetDevice(n); // Take inverse FFT in Z and Y direction RESULT = cufftExecZ2D(fft.invp2d[n], f[n], (cufftDoubleReal *)f[n]); if ( RESULT != CUFFT_SUCCESS){ printf("CUFFT error: ExecD2Z failed on line %d, Error code %d\n", __LINE__, RESULT); return; } // printf("Taking 2D inverse FFT on GPU #%2d\n",n); } for(n = 0; n<gpu.nGPUs; ++n){ cudaSetDevice(n); const dim3 blockSize(TX, TY, TZ); const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), divUp(NZ, TZ)); scaleKernel_mgpu<<<gridSize, blockSize>>>(gpu.start_x[n], (cufftDoubleReal *)f[n]); } // printf("Scaled Inverse Transform Completed...\n"); return; }
d472d47aef1dbd591c59adc7f44ca6bac8000a36.hip
// !!! This is a file automatically generated by hipify!!! /* * 2D Heat Diffusion * * In this homework you will be implementing a finite difference 2D-Heat Diffusion Solver * in three different ways, in particular with and without using shared memory. * You will implement stencils of orders 2, 4 and 8. A reference CPU implementation * has been provided. You should keep all existing classes, method names, function names, * and variables as is. * * The simParams and Grid classes are provided for convenience. The simParams class will * load a file containing all the information needed for the simulation and calculate the * maximum stable CFL number. The Grid will set up a grid with the appropriate boundary and * initial conditions. * * Some general notes about declaring N-dimensional arrays. * You may have seen / been taught to do this in the past: * int **A = (int **)malloc(numRows * sizeof(int *)); * for (int r = 0; r < numRows; ++r) * A[r] = (int *)malloc(numCols * sizeof(int)); * * so that you can then access elements of A with the notation A[row][col], which involves dereferencing * two pointers. This is a *really bad* way to represent 2D arrays for a couple of reasons. * * 1) For a NxN array, it does N+1 mallocs which is slow. And on the gpu setting up this data * structure is inconvenient. But you should know how to do it. * 2) There is absolutely no guarantee that different rows are even remotely close in memory; * subsequent rows could allocated on complete opposite sides of the address space * which leads to terrible cache behavior. * 3) The double indirection leads to really high memory latency. To access location A[i][j], * first we have to make a trip to memory to fetch A[i], and once we get that pointer, we have to make another * trip to memory to fetch (A[i])[j]. It would be far better if we only had to make one trip to * memory. This is especially important on the gpu. * * The *better way* - just allocate one 1-D array of size N*N. Then just calculate the correct offset - * A[i][j] = *(A + i * numCols + j). There is only one allocation, adjacent rows are as close as they can be * and we only make one trip to memory to fetch a value. The grid implements this storage scheme * "under the hood" and overloads the () operator to allow the more familiar (x, y) notation. * * For the GPU code in this exercise you don't need to worry about trying to be fancy and overload an operator * or use some #define macro magic to mimic the same behavior - you can just do the raw addressing calculations. * * For the first part of the homework where you will implement the kernels without using shared memory * each thread should compute exactly one output. * * For the second part with shared memory - it is recommended that you use 1D blocks since the ideal * implementation will have each thread outputting more than 1 value and the addressing arithmetic * is actually easier with 1D blocks. */ #include <ostream> #include <iostream> #include <iomanip> #include <limits> #include <fstream> #include <string> #include <fstream> #include <cmath> #include <cstdlib> #include <cassert> #include <unistd.h> #include "mp1-util.h" #include "simParams.h" #include "Grid.h" #include "gpuStencil.hip" using std::setw; using std::setprecision; using std::cout; using std::endl; void updateBCsOnly(Grid& grid, Grid& prev, const simParams& params) { const int borderSize = params.order() / 2; const int gx = params.gx(); const int gy = params.gy(); const float dt = params.dt(); const float scaling_factor = exp(-2 * dt); assert(scaling_factor > 0); const int upper_border_x = gx - borderSize; const int upper_border_y = gy - borderSize; for(int i = 0; i < gx; ++i) { for(int j = 0; j < borderSize; ++j) { grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor; } for(int j = upper_border_y; j < gy; ++j) { grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor; } } for(int j = borderSize; j < upper_border_y; ++j) { for(int i = 0; i < borderSize; ++i) { grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor; } for(int i = upper_border_x; i < gx; ++i) { grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor; } } /* // Testing that the boundary conditions were correctly applied for (int i = 0; i < gx; ++i) for (int j = 0; j < gy; ++j) if (i<borderSize || i >= upper_border_x || j<borderSize || j >= upper_border_y) assert(grid.hGrid_[i + gx * j] == prev.hGrid_[i + gx * j] * scaling_factor); */ } void initGrid(Grid& grid, const simParams& params) { const int gx = params.gx(); const int gy = params.gy(); const double dx = params.dx(); const double dy = params.dy(); for(int i = 0; i < gx; ++i) { for(int j = 0; j < gy; ++j) { grid.hGrid_.at(i + gx * j) = sin(i * dx) * sin(j * dy); } } grid.toGPU(); } template<int order> inline float stencil(float* curr_grid, int gx, int x, int y, float xcfl, float ycfl) { if(order == 2) { return curr_grid[x + gx * y] + xcfl * (curr_grid[x+1 + gx * y] + curr_grid[x-1 + gx * y] - 2 * curr_grid[x + gx * y]) + ycfl * (curr_grid[x + gx *(y+1)] + curr_grid[x + gx *(y-1)] - 2 * curr_grid[x + gx * y]); } else if(order == 4) { return curr_grid[x + gx * y] + xcfl * (-curr_grid[x+2 + gx * y] + 16 * curr_grid[x+1 + gx * y] - 30 * curr_grid[x + gx * y] + 16 * curr_grid[x-1 + gx * y] - curr_grid[x-2 + gx * y]) + ycfl * (-curr_grid[x + gx * (y+2)] + 16 * curr_grid[x + gx * (y+1)] - 30 * curr_grid[x + gx * y] + 16 * curr_grid[x + gx * (y-1)] - curr_grid[x + gx * (y-2)]); } else if(order == 8) { return curr_grid[x + gx * y] + xcfl * (-9*curr_grid[x+4 + gx * y] + 128 * curr_grid[x+3 + gx * y] - 1008 * curr_grid[x+2 + gx * y] + 8064 * curr_grid[x+1 + gx * y] - 14350 * curr_grid[x + gx * y] + 8064 * curr_grid[x-1 + gx * y] - 1008 * curr_grid[x-2 + gx * y] + 128 * curr_grid[x-3 + gx * y] -9 * curr_grid[x-4 + gx * y]) + ycfl * (-9*curr_grid[x + gx * (y+4)] + 128 * curr_grid[x + gx * (y+3)] - 1008 * curr_grid[x + gx * (y+2)] + 8064 * curr_grid[x + gx * (y+1)] - 14350 * curr_grid[x + gx * y] + 8064 * curr_grid[x + gx * (y-1)] - 1008 * curr_grid[x + gx * (y-2)] + 128 * curr_grid[x + gx * (y-3)] - 9 * curr_grid[x + gx * (y-4)]); } else { return std::numeric_limits<float>::quiet_NaN(); } } double cpuComputation(Grid& curr_grid, const simParams& params) { Grid next_grid(curr_grid); event_pair timer; start_timer(&timer); float xcfl = params.xcfl(); float ycfl = params.ycfl(); int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); int borderSize = params.borderSize(); for(int i = 0; i < params.iters(); ++i) { // update the values on the boundary only updateBCsOnly(curr_grid, next_grid, params); // apply stencil if(params.order() == 2) { for(int y = borderSize; y < ny + borderSize; ++y) { for(int x = borderSize; x < nx + borderSize; ++x) { next_grid.hGrid_[x + gx * y] = stencil<2>(curr_grid.hGrid_.data(), gx, x, y, xcfl, ycfl); } } } else if(params.order() == 4) { for(int y = borderSize; y < ny + borderSize; ++y) { for(int x = borderSize; x < nx + borderSize; ++x) { next_grid.hGrid_[x + gx * y] = stencil<4>(curr_grid.hGrid_.data(), gx, x, y, xcfl, ycfl); } } } else if(params.order() == 8) { for(int y = borderSize; y < ny + borderSize; ++y) { for(int x = borderSize; x < nx + borderSize; ++x) { next_grid.hGrid_[x + gx * y] = stencil<8>(curr_grid.hGrid_.data(), gx, x, y, xcfl, ycfl); } } } Grid::swap(curr_grid, next_grid); } return stop_timer(&timer); } int checkErrors(const Grid& ref_grid, const Grid& gpu_grid, const simParams& params, std::string filename, std::vector<double>& errors) { //check that we got the same answer std::ofstream ofs(filename.c_str()); int error = 0; double l2ref = 0; double linf = 0; double curr = 0; double l2err = 0; for(int x = 0; x < params.gx(); ++x) { for(int y = 0; y < params.gy(); ++y) { if(!AlmostEqualUlps(ref_grid.hGrid_[y * params.gx() + x], gpu_grid.hGrid_[x + params.gx() * y], 512)) { ofs << "Mismatch at pos (" << x << ", " << y << ") cpu: " << ref_grid.hGrid_[x + params.gx() * y] << " gpu: " << gpu_grid.hGrid_[y * params.gx() + x] << endl; ++error; } l2ref += ref_grid.hGrid_[y * params.gx() + x] * ref_grid.hGrid_[y * params.gx() + x]; l2err += (ref_grid.hGrid_[y * params.gx() + x] - gpu_grid.hGrid_[x + params.gx() * y]) * (ref_grid.hGrid_[y * params.gx() + x] - gpu_grid.hGrid_[x + params.gx() * y]); if(ref_grid.hGrid_[y * params.gx() + x] != 0) { curr = abs((ref_grid.hGrid_[y * params.gx() + x] - gpu_grid.hGrid_[x + params.gx() * y]) / (ref_grid.hGrid_[y * params.gx() + x])); } if(curr > linf) { linf = curr; } } } if(error) std::cerr << "There were " << error << " total locations where there was a difference between the cpu and gpu" << endl; errors.push_back(l2ref); errors.push_back(linf); errors.push_back(l2err); ofs.close(); return error; } void PrintErrors(const std::vector<double>& errorsg, const std::vector<double>& errorsb, const std::vector<double>& errorss) { cout << endl; cout << setw(15) << " " << setw(15) << "L2Ref" << setw(15) << "LInf" << setw( 15) << "L2Err" << endl; if(errorsg.size() > 0) { cout << setw(15) << "Global" << setw(15) << setprecision(6) << errorsg[0] << setw(15) << errorsg[1] << setw(15) << sqrt(errorsg[2] / errorsg[0]) << endl; } if(errorsb.size() > 0) { cout << setw(15) << "Block" << setw(15) << setprecision(6) << errorsb[0] << setw(15) << errorsb[1] << setw(15) << sqrt(errorsb[2] / errorsb[0]) << endl; } if(errorss.size() > 0) { cout << setw(15) << "Shared" << setw(15) << setprecision(6) << errorss[0] << setw(15) << errorss[1] << setw(15) << sqrt(errorss[2] / errorss[0]) << endl; } cout << endl; } int main(int argc, char* argv[]) { bool doGlobal = false; bool doShared = false; bool doBlock = false; std::string helpString = "Usage:\n./heat [-gsb]" "\n-g\tPerform the calculation using global memory" "\n-s\tPerform the calculation using shared memory" "\n-b\tPerform the calculation using block memory" "\n\nBoth options can be passed\n"; if(argc == 1) { std::cerr << helpString; exit(1); } { int opt; while((opt = getopt(argc, argv, "gsb")) != -1) { switch(opt) { case 'g': doGlobal = true; break; case 's': doShared = true; break; case 'b': doBlock = true; break; default: std::cerr << helpString; exit(1); }; } } //load the parameters, setup the grid with the initial and boundary conditions simParams params("params.in"); Grid grid(params.gx(), params.gy()); initGrid(grid, params); //for debugging, you may want to uncomment this line // grid.saveStateToFile("init"); //save our initial state, useful for making sure we got setup and BCs right cout << "Order: " << params.order() << endl; cout << setw(15) << " " << setw(15) << "time (ms)" << setw( 15) << "GBytes/sec" << endl; //compute our reference solution double elapsed = cpuComputation(grid, params); //for debugging, you may want to uncomment the following line // grid.saveStateToFile("final_cpu"); //Print statistics cout << setw(15) << "CPU" << setw(15) << setprecision(6) << elapsed << setw(15) << params.calcBytes() / (elapsed / 1E3) / 1E9 << endl; std::vector<double> errorsb, errorsg, errorss; // Use global memory if(doGlobal) { Grid gpuGrid(grid); // Set up a grid with same dimension as grid initGrid(gpuGrid, params); // Initialize the grid elapsed = gpuComputation(gpuGrid, params); // Calculation on the GPU cout << setw(15) << "Global" << setw(15) << setprecision(6) << elapsed << setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl; // Copy back the solution gpuGrid.fromGPU(); // Check for errors checkErrors(grid, gpuGrid, params, "globalErrors.txt", errorsg); //for debugging, save data to file // gpuGrid.saveStateToFile("final_gpu_global"); } // This kernel iterates inside a large sub-domain if(doBlock) { Grid gpuGrid(grid); initGrid(gpuGrid, params); elapsed = gpuComputationLoop(gpuGrid, params); cout << setw(15) << "Block" << setw(15) << setprecision(6) << elapsed << setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl; gpuGrid.fromGPU(); checkErrors(grid, gpuGrid, params, "globalErrors.txt", errorsb); // gpuGrid.saveStateToFile("final_gpu_block"); } // This kernel uses shared memory if(doShared) { Grid gpuGrid(grid); initGrid(gpuGrid, params); if(params.order() == 2) { elapsed = gpuComputationShared<2>(gpuGrid, params); } else if(params.order() == 4) { elapsed = gpuComputationShared<4>(gpuGrid, params); } else if(params.order() == 8) { elapsed = gpuComputationShared<8>(gpuGrid, params); } cout << setw(15) << "Shared" << setw(15) << setprecision(6) << elapsed << setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl; gpuGrid.fromGPU(); checkErrors(grid, gpuGrid, params, "sharedErrors.txt", errorss); // gpuGrid.saveStateToFile("final_gpu_shared"); } PrintErrors(errorsg, errorsb, errorss); return 0; }
d472d47aef1dbd591c59adc7f44ca6bac8000a36.cu
/* * 2D Heat Diffusion * * In this homework you will be implementing a finite difference 2D-Heat Diffusion Solver * in three different ways, in particular with and without using shared memory. * You will implement stencils of orders 2, 4 and 8. A reference CPU implementation * has been provided. You should keep all existing classes, method names, function names, * and variables as is. * * The simParams and Grid classes are provided for convenience. The simParams class will * load a file containing all the information needed for the simulation and calculate the * maximum stable CFL number. The Grid will set up a grid with the appropriate boundary and * initial conditions. * * Some general notes about declaring N-dimensional arrays. * You may have seen / been taught to do this in the past: * int **A = (int **)malloc(numRows * sizeof(int *)); * for (int r = 0; r < numRows; ++r) * A[r] = (int *)malloc(numCols * sizeof(int)); * * so that you can then access elements of A with the notation A[row][col], which involves dereferencing * two pointers. This is a *really bad* way to represent 2D arrays for a couple of reasons. * * 1) For a NxN array, it does N+1 mallocs which is slow. And on the gpu setting up this data * structure is inconvenient. But you should know how to do it. * 2) There is absolutely no guarantee that different rows are even remotely close in memory; * subsequent rows could allocated on complete opposite sides of the address space * which leads to terrible cache behavior. * 3) The double indirection leads to really high memory latency. To access location A[i][j], * first we have to make a trip to memory to fetch A[i], and once we get that pointer, we have to make another * trip to memory to fetch (A[i])[j]. It would be far better if we only had to make one trip to * memory. This is especially important on the gpu. * * The *better way* - just allocate one 1-D array of size N*N. Then just calculate the correct offset - * A[i][j] = *(A + i * numCols + j). There is only one allocation, adjacent rows are as close as they can be * and we only make one trip to memory to fetch a value. The grid implements this storage scheme * "under the hood" and overloads the () operator to allow the more familiar (x, y) notation. * * For the GPU code in this exercise you don't need to worry about trying to be fancy and overload an operator * or use some #define macro magic to mimic the same behavior - you can just do the raw addressing calculations. * * For the first part of the homework where you will implement the kernels without using shared memory * each thread should compute exactly one output. * * For the second part with shared memory - it is recommended that you use 1D blocks since the ideal * implementation will have each thread outputting more than 1 value and the addressing arithmetic * is actually easier with 1D blocks. */ #include <ostream> #include <iostream> #include <iomanip> #include <limits> #include <fstream> #include <string> #include <fstream> #include <cmath> #include <cstdlib> #include <cassert> #include <unistd.h> #include "mp1-util.h" #include "simParams.h" #include "Grid.h" #include "gpuStencil.cu" using std::setw; using std::setprecision; using std::cout; using std::endl; void updateBCsOnly(Grid& grid, Grid& prev, const simParams& params) { const int borderSize = params.order() / 2; const int gx = params.gx(); const int gy = params.gy(); const float dt = params.dt(); const float scaling_factor = exp(-2 * dt); assert(scaling_factor > 0); const int upper_border_x = gx - borderSize; const int upper_border_y = gy - borderSize; for(int i = 0; i < gx; ++i) { for(int j = 0; j < borderSize; ++j) { grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor; } for(int j = upper_border_y; j < gy; ++j) { grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor; } } for(int j = borderSize; j < upper_border_y; ++j) { for(int i = 0; i < borderSize; ++i) { grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor; } for(int i = upper_border_x; i < gx; ++i) { grid.hGrid_[i + gx * j] = prev.hGrid_[i + gx * j] * scaling_factor; } } /* // Testing that the boundary conditions were correctly applied for (int i = 0; i < gx; ++i) for (int j = 0; j < gy; ++j) if (i<borderSize || i >= upper_border_x || j<borderSize || j >= upper_border_y) assert(grid.hGrid_[i + gx * j] == prev.hGrid_[i + gx * j] * scaling_factor); */ } void initGrid(Grid& grid, const simParams& params) { const int gx = params.gx(); const int gy = params.gy(); const double dx = params.dx(); const double dy = params.dy(); for(int i = 0; i < gx; ++i) { for(int j = 0; j < gy; ++j) { grid.hGrid_.at(i + gx * j) = sin(i * dx) * sin(j * dy); } } grid.toGPU(); } template<int order> inline float stencil(float* curr_grid, int gx, int x, int y, float xcfl, float ycfl) { if(order == 2) { return curr_grid[x + gx * y] + xcfl * (curr_grid[x+1 + gx * y] + curr_grid[x-1 + gx * y] - 2 * curr_grid[x + gx * y]) + ycfl * (curr_grid[x + gx *(y+1)] + curr_grid[x + gx *(y-1)] - 2 * curr_grid[x + gx * y]); } else if(order == 4) { return curr_grid[x + gx * y] + xcfl * (-curr_grid[x+2 + gx * y] + 16 * curr_grid[x+1 + gx * y] - 30 * curr_grid[x + gx * y] + 16 * curr_grid[x-1 + gx * y] - curr_grid[x-2 + gx * y]) + ycfl * (-curr_grid[x + gx * (y+2)] + 16 * curr_grid[x + gx * (y+1)] - 30 * curr_grid[x + gx * y] + 16 * curr_grid[x + gx * (y-1)] - curr_grid[x + gx * (y-2)]); } else if(order == 8) { return curr_grid[x + gx * y] + xcfl * (-9*curr_grid[x+4 + gx * y] + 128 * curr_grid[x+3 + gx * y] - 1008 * curr_grid[x+2 + gx * y] + 8064 * curr_grid[x+1 + gx * y] - 14350 * curr_grid[x + gx * y] + 8064 * curr_grid[x-1 + gx * y] - 1008 * curr_grid[x-2 + gx * y] + 128 * curr_grid[x-3 + gx * y] -9 * curr_grid[x-4 + gx * y]) + ycfl * (-9*curr_grid[x + gx * (y+4)] + 128 * curr_grid[x + gx * (y+3)] - 1008 * curr_grid[x + gx * (y+2)] + 8064 * curr_grid[x + gx * (y+1)] - 14350 * curr_grid[x + gx * y] + 8064 * curr_grid[x + gx * (y-1)] - 1008 * curr_grid[x + gx * (y-2)] + 128 * curr_grid[x + gx * (y-3)] - 9 * curr_grid[x + gx * (y-4)]); } else { return std::numeric_limits<float>::quiet_NaN(); } } double cpuComputation(Grid& curr_grid, const simParams& params) { Grid next_grid(curr_grid); event_pair timer; start_timer(&timer); float xcfl = params.xcfl(); float ycfl = params.ycfl(); int nx = params.nx(); int ny = params.ny(); int gx = params.gx(); int borderSize = params.borderSize(); for(int i = 0; i < params.iters(); ++i) { // update the values on the boundary only updateBCsOnly(curr_grid, next_grid, params); // apply stencil if(params.order() == 2) { for(int y = borderSize; y < ny + borderSize; ++y) { for(int x = borderSize; x < nx + borderSize; ++x) { next_grid.hGrid_[x + gx * y] = stencil<2>(curr_grid.hGrid_.data(), gx, x, y, xcfl, ycfl); } } } else if(params.order() == 4) { for(int y = borderSize; y < ny + borderSize; ++y) { for(int x = borderSize; x < nx + borderSize; ++x) { next_grid.hGrid_[x + gx * y] = stencil<4>(curr_grid.hGrid_.data(), gx, x, y, xcfl, ycfl); } } } else if(params.order() == 8) { for(int y = borderSize; y < ny + borderSize; ++y) { for(int x = borderSize; x < nx + borderSize; ++x) { next_grid.hGrid_[x + gx * y] = stencil<8>(curr_grid.hGrid_.data(), gx, x, y, xcfl, ycfl); } } } Grid::swap(curr_grid, next_grid); } return stop_timer(&timer); } int checkErrors(const Grid& ref_grid, const Grid& gpu_grid, const simParams& params, std::string filename, std::vector<double>& errors) { //check that we got the same answer std::ofstream ofs(filename.c_str()); int error = 0; double l2ref = 0; double linf = 0; double curr = 0; double l2err = 0; for(int x = 0; x < params.gx(); ++x) { for(int y = 0; y < params.gy(); ++y) { if(!AlmostEqualUlps(ref_grid.hGrid_[y * params.gx() + x], gpu_grid.hGrid_[x + params.gx() * y], 512)) { ofs << "Mismatch at pos (" << x << ", " << y << ") cpu: " << ref_grid.hGrid_[x + params.gx() * y] << " gpu: " << gpu_grid.hGrid_[y * params.gx() + x] << endl; ++error; } l2ref += ref_grid.hGrid_[y * params.gx() + x] * ref_grid.hGrid_[y * params.gx() + x]; l2err += (ref_grid.hGrid_[y * params.gx() + x] - gpu_grid.hGrid_[x + params.gx() * y]) * (ref_grid.hGrid_[y * params.gx() + x] - gpu_grid.hGrid_[x + params.gx() * y]); if(ref_grid.hGrid_[y * params.gx() + x] != 0) { curr = abs((ref_grid.hGrid_[y * params.gx() + x] - gpu_grid.hGrid_[x + params.gx() * y]) / (ref_grid.hGrid_[y * params.gx() + x])); } if(curr > linf) { linf = curr; } } } if(error) std::cerr << "There were " << error << " total locations where there was a difference between the cpu and gpu" << endl; errors.push_back(l2ref); errors.push_back(linf); errors.push_back(l2err); ofs.close(); return error; } void PrintErrors(const std::vector<double>& errorsg, const std::vector<double>& errorsb, const std::vector<double>& errorss) { cout << endl; cout << setw(15) << " " << setw(15) << "L2Ref" << setw(15) << "LInf" << setw( 15) << "L2Err" << endl; if(errorsg.size() > 0) { cout << setw(15) << "Global" << setw(15) << setprecision(6) << errorsg[0] << setw(15) << errorsg[1] << setw(15) << sqrt(errorsg[2] / errorsg[0]) << endl; } if(errorsb.size() > 0) { cout << setw(15) << "Block" << setw(15) << setprecision(6) << errorsb[0] << setw(15) << errorsb[1] << setw(15) << sqrt(errorsb[2] / errorsb[0]) << endl; } if(errorss.size() > 0) { cout << setw(15) << "Shared" << setw(15) << setprecision(6) << errorss[0] << setw(15) << errorss[1] << setw(15) << sqrt(errorss[2] / errorss[0]) << endl; } cout << endl; } int main(int argc, char* argv[]) { bool doGlobal = false; bool doShared = false; bool doBlock = false; std::string helpString = "Usage:\n./heat [-gsb]" "\n-g\tPerform the calculation using global memory" "\n-s\tPerform the calculation using shared memory" "\n-b\tPerform the calculation using block memory" "\n\nBoth options can be passed\n"; if(argc == 1) { std::cerr << helpString; exit(1); } { int opt; while((opt = getopt(argc, argv, "gsb")) != -1) { switch(opt) { case 'g': doGlobal = true; break; case 's': doShared = true; break; case 'b': doBlock = true; break; default: std::cerr << helpString; exit(1); }; } } //load the parameters, setup the grid with the initial and boundary conditions simParams params("params.in"); Grid grid(params.gx(), params.gy()); initGrid(grid, params); //for debugging, you may want to uncomment this line // grid.saveStateToFile("init"); //save our initial state, useful for making sure we got setup and BCs right cout << "Order: " << params.order() << endl; cout << setw(15) << " " << setw(15) << "time (ms)" << setw( 15) << "GBytes/sec" << endl; //compute our reference solution double elapsed = cpuComputation(grid, params); //for debugging, you may want to uncomment the following line // grid.saveStateToFile("final_cpu"); //Print statistics cout << setw(15) << "CPU" << setw(15) << setprecision(6) << elapsed << setw(15) << params.calcBytes() / (elapsed / 1E3) / 1E9 << endl; std::vector<double> errorsb, errorsg, errorss; // Use global memory if(doGlobal) { Grid gpuGrid(grid); // Set up a grid with same dimension as grid initGrid(gpuGrid, params); // Initialize the grid elapsed = gpuComputation(gpuGrid, params); // Calculation on the GPU cout << setw(15) << "Global" << setw(15) << setprecision(6) << elapsed << setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl; // Copy back the solution gpuGrid.fromGPU(); // Check for errors checkErrors(grid, gpuGrid, params, "globalErrors.txt", errorsg); //for debugging, save data to file // gpuGrid.saveStateToFile("final_gpu_global"); } // This kernel iterates inside a large sub-domain if(doBlock) { Grid gpuGrid(grid); initGrid(gpuGrid, params); elapsed = gpuComputationLoop(gpuGrid, params); cout << setw(15) << "Block" << setw(15) << setprecision(6) << elapsed << setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl; gpuGrid.fromGPU(); checkErrors(grid, gpuGrid, params, "globalErrors.txt", errorsb); // gpuGrid.saveStateToFile("final_gpu_block"); } // This kernel uses shared memory if(doShared) { Grid gpuGrid(grid); initGrid(gpuGrid, params); if(params.order() == 2) { elapsed = gpuComputationShared<2>(gpuGrid, params); } else if(params.order() == 4) { elapsed = gpuComputationShared<4>(gpuGrid, params); } else if(params.order() == 8) { elapsed = gpuComputationShared<8>(gpuGrid, params); } cout << setw(15) << "Shared" << setw(15) << setprecision(6) << elapsed << setw(15) << (params.calcBytes() / (elapsed / 1E3)) / 1E9 << endl; gpuGrid.fromGPU(); checkErrors(grid, gpuGrid, params, "sharedErrors.txt", errorss); // gpuGrid.saveStateToFile("final_gpu_shared"); } PrintErrors(errorsg, errorsb, errorss); return 0; }
c4f1af6c01bca9a03640e90e106ef2e2b41c78ac.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/strings/utilities.h> #include <cudf/column/column_factories.hpp> #include <cudf/copying.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cstring> #include <vector> struct StringsFactoriesTest : public cudf::test::BaseFixture { }; TEST_F(StringsFactoriesTest, CreateColumnFromPair) { std::vector<const char*> h_test_strings{"the quick brown fox jumps over the lazy dog", "the fat cat lays next to the other accnted cat", "a slow moving turtl cannot catch the bird", "which can be composd together to form a more complete", "th result does not include the value in the sum in", "", nullptr, "absent stop words"}; cudf::size_type memsize = 0; for (auto itr = h_test_strings.begin(); itr != h_test_strings.end(); ++itr) memsize += *itr ? (cudf::size_type)strlen(*itr) : 0; cudf::size_type count = (cudf::size_type)h_test_strings.size(); thrust::host_vector<char> h_buffer(memsize); thrust::device_vector<char> d_buffer(memsize); thrust::host_vector<thrust::pair<const char*, cudf::size_type>> strings(count); thrust::host_vector<cudf::size_type> h_offsets(count + 1); cudf::size_type offset = 0; cudf::size_type nulls = 0; h_offsets[0] = 0; for (cudf::size_type idx = 0; idx < count; ++idx) { const char* str = h_test_strings[idx]; if (!str) { strings[idx] = thrust::pair<const char*, cudf::size_type>{nullptr, 0}; nulls++; } else { cudf::size_type length = (cudf::size_type)strlen(str); memcpy(h_buffer.data() + offset, str, length); strings[idx] = thrust::pair<const char*, cudf::size_type>{d_buffer.data().get() + offset, length}; offset += length; } h_offsets[idx + 1] = offset; } rmm::device_vector<thrust::pair<const char*, cudf::size_type>> d_strings(strings); CUDA_TRY(hipMemcpy(d_buffer.data().get(), h_buffer.data(), memsize, hipMemcpyHostToDevice)); auto column = cudf::make_strings_column(d_strings); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_id::STRING}); EXPECT_EQ(column->null_count(), nulls); if (nulls) { EXPECT_TRUE(column->nullable()); EXPECT_TRUE(column->has_nulls()); } EXPECT_EQ(2, column->num_children()); cudf::strings_column_view strings_view(column->view()); EXPECT_EQ(strings_view.size(), count); EXPECT_EQ(strings_view.offsets().size(), count + 1); EXPECT_EQ(strings_view.chars().size(), memsize); // check string data auto strings_data = cudf::strings::create_offsets(strings_view); thrust::host_vector<char> h_chars_data(strings_data.first); thrust::host_vector<cudf::size_type> h_offsets_data(strings_data.second); EXPECT_EQ(memcmp(h_buffer.data(), h_chars_data.data(), h_buffer.size()), 0); EXPECT_EQ( memcmp(h_offsets.data(), h_offsets_data.data(), h_offsets.size() * sizeof(cudf::size_type)), 0); } TEST_F(StringsFactoriesTest, CreateColumnFromOffsets) { std::vector<const char*> h_test_strings{"the quick brown fox jumps over the lazy dog", "the fat cat lays next to the other accnted cat", "a slow moving turtl cannot catch the bird", "which can be composd together to form a more complete", "th result does not include the value in the sum in", "", nullptr, "absent stop words"}; cudf::size_type memsize = 0; for (auto itr = h_test_strings.begin(); itr != h_test_strings.end(); ++itr) memsize += *itr ? (cudf::size_type)strlen(*itr) : 0; cudf::size_type count = (cudf::size_type)h_test_strings.size(); std::vector<char> h_buffer(memsize); std::vector<cudf::size_type> h_offsets(count + 1); cudf::size_type offset = 0; h_offsets[0] = offset; cudf::bitmask_type h_null_mask = 0; cudf::size_type null_count = 0; for (cudf::size_type idx = 0; idx < count; ++idx) { h_null_mask = (h_null_mask << 1); const char* str = h_test_strings[idx]; if (str) { cudf::size_type length = (cudf::size_type)strlen(str); memcpy(h_buffer.data() + offset, str, length); offset += length; h_null_mask |= 1; } else null_count++; h_offsets[idx + 1] = offset; } std::vector<cudf::bitmask_type> h_nulls{h_null_mask}; rmm::device_vector<char> d_buffer(h_buffer); rmm::device_vector<cudf::size_type> d_offsets(h_offsets); rmm::device_vector<cudf::bitmask_type> d_nulls(h_nulls); auto column = cudf::make_strings_column(d_buffer, d_offsets, d_nulls, null_count); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_id::STRING}); EXPECT_EQ(column->null_count(), null_count); EXPECT_EQ(2, column->num_children()); cudf::strings_column_view strings_view(column->view()); EXPECT_EQ(strings_view.size(), count); EXPECT_EQ(strings_view.offsets().size(), count + 1); EXPECT_EQ(strings_view.chars().size(), memsize); // check string data auto strings_data = cudf::strings::create_offsets(strings_view); thrust::host_vector<char> h_chars_data(strings_data.first); thrust::host_vector<cudf::size_type> h_offsets_data(strings_data.second); EXPECT_EQ(memcmp(h_buffer.data(), h_chars_data.data(), h_buffer.size()), 0); EXPECT_EQ( memcmp(h_offsets.data(), h_offsets_data.data(), h_offsets.size() * sizeof(cudf::size_type)), 0); // check host version of the factory too auto column2 = cudf::make_strings_column(h_buffer, h_offsets, h_nulls, null_count); CUDF_TEST_EXPECT_COLUMNS_EQUAL(column->view(), column2->view()); } TEST_F(StringsFactoriesTest, CreateScalar) { std::string value = "test string"; auto s = cudf::make_string_scalar(value); auto string_s = static_cast<cudf::string_scalar*>(s.get()); EXPECT_EQ(string_s->to_string(), value); EXPECT_TRUE(string_s->is_valid()); EXPECT_TRUE(s->is_valid()); } TEST_F(StringsFactoriesTest, EmptyStringsColumn) { rmm::device_vector<char> d_chars; rmm::device_vector<cudf::size_type> d_offsets(1, 0); rmm::device_vector<cudf::bitmask_type> d_nulls; auto results = cudf::make_strings_column(d_chars, d_offsets, d_nulls, 0); cudf::test::expect_strings_empty(results->view()); rmm::device_vector<thrust::pair<const char*, cudf::size_type>> d_strings; results = cudf::make_strings_column(d_strings); cudf::test::expect_strings_empty(results->view()); } TEST_F(StringsFactoriesTest, CreateOffsets) { std::vector<std::string> strings = {"this", "is", "a", "column", "of", "strings"}; cudf::test::strings_column_wrapper sw = {strings.begin(), strings.end()}; cudf::column_view col(sw); std::vector<cudf::size_type> indices{0, 2, 3, 6}; auto result = cudf::slice(col, indices); std::vector<std::vector<std::string>> expecteds{ std::vector<std::string>{"this", "is"}, // [0,2) std::vector<std::string>{"column", "of", "strings"} // [3,6) }; for (size_t idx = 0; idx < result.size(); idx++) { auto strings_data = cudf::strings::create_offsets(cudf::strings_column_view(result[idx])); thrust::host_vector<char> h_chars(strings_data.first); thrust::host_vector<cudf::size_type> h_offsets(strings_data.second); auto expected_strings = expecteds[idx]; for (size_t jdx = 0; jdx < h_offsets.size() - 1; ++jdx) { auto offset = h_offsets[jdx]; auto length = h_offsets[jdx + 1] - offset; std::string str(h_chars.data() + offset, length); EXPECT_EQ(str, expected_strings[jdx]); } } }
c4f1af6c01bca9a03640e90e106ef2e2b41c78ac.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/strings/utilities.h> #include <cudf/column/column_factories.hpp> #include <cudf/copying.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cstring> #include <vector> struct StringsFactoriesTest : public cudf::test::BaseFixture { }; TEST_F(StringsFactoriesTest, CreateColumnFromPair) { std::vector<const char*> h_test_strings{"the quick brown fox jumps over the lazy dog", "the fat cat lays next to the other accénted cat", "a slow moving turtlé cannot catch the bird", "which can be composéd together to form a more complete", "thé result does not include the value in the sum in", "", nullptr, "absent stop words"}; cudf::size_type memsize = 0; for (auto itr = h_test_strings.begin(); itr != h_test_strings.end(); ++itr) memsize += *itr ? (cudf::size_type)strlen(*itr) : 0; cudf::size_type count = (cudf::size_type)h_test_strings.size(); thrust::host_vector<char> h_buffer(memsize); thrust::device_vector<char> d_buffer(memsize); thrust::host_vector<thrust::pair<const char*, cudf::size_type>> strings(count); thrust::host_vector<cudf::size_type> h_offsets(count + 1); cudf::size_type offset = 0; cudf::size_type nulls = 0; h_offsets[0] = 0; for (cudf::size_type idx = 0; idx < count; ++idx) { const char* str = h_test_strings[idx]; if (!str) { strings[idx] = thrust::pair<const char*, cudf::size_type>{nullptr, 0}; nulls++; } else { cudf::size_type length = (cudf::size_type)strlen(str); memcpy(h_buffer.data() + offset, str, length); strings[idx] = thrust::pair<const char*, cudf::size_type>{d_buffer.data().get() + offset, length}; offset += length; } h_offsets[idx + 1] = offset; } rmm::device_vector<thrust::pair<const char*, cudf::size_type>> d_strings(strings); CUDA_TRY(cudaMemcpy(d_buffer.data().get(), h_buffer.data(), memsize, cudaMemcpyHostToDevice)); auto column = cudf::make_strings_column(d_strings); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_id::STRING}); EXPECT_EQ(column->null_count(), nulls); if (nulls) { EXPECT_TRUE(column->nullable()); EXPECT_TRUE(column->has_nulls()); } EXPECT_EQ(2, column->num_children()); cudf::strings_column_view strings_view(column->view()); EXPECT_EQ(strings_view.size(), count); EXPECT_EQ(strings_view.offsets().size(), count + 1); EXPECT_EQ(strings_view.chars().size(), memsize); // check string data auto strings_data = cudf::strings::create_offsets(strings_view); thrust::host_vector<char> h_chars_data(strings_data.first); thrust::host_vector<cudf::size_type> h_offsets_data(strings_data.second); EXPECT_EQ(memcmp(h_buffer.data(), h_chars_data.data(), h_buffer.size()), 0); EXPECT_EQ( memcmp(h_offsets.data(), h_offsets_data.data(), h_offsets.size() * sizeof(cudf::size_type)), 0); } TEST_F(StringsFactoriesTest, CreateColumnFromOffsets) { std::vector<const char*> h_test_strings{"the quick brown fox jumps over the lazy dog", "the fat cat lays next to the other accénted cat", "a slow moving turtlé cannot catch the bird", "which can be composéd together to form a more complete", "thé result does not include the value in the sum in", "", nullptr, "absent stop words"}; cudf::size_type memsize = 0; for (auto itr = h_test_strings.begin(); itr != h_test_strings.end(); ++itr) memsize += *itr ? (cudf::size_type)strlen(*itr) : 0; cudf::size_type count = (cudf::size_type)h_test_strings.size(); std::vector<char> h_buffer(memsize); std::vector<cudf::size_type> h_offsets(count + 1); cudf::size_type offset = 0; h_offsets[0] = offset; cudf::bitmask_type h_null_mask = 0; cudf::size_type null_count = 0; for (cudf::size_type idx = 0; idx < count; ++idx) { h_null_mask = (h_null_mask << 1); const char* str = h_test_strings[idx]; if (str) { cudf::size_type length = (cudf::size_type)strlen(str); memcpy(h_buffer.data() + offset, str, length); offset += length; h_null_mask |= 1; } else null_count++; h_offsets[idx + 1] = offset; } std::vector<cudf::bitmask_type> h_nulls{h_null_mask}; rmm::device_vector<char> d_buffer(h_buffer); rmm::device_vector<cudf::size_type> d_offsets(h_offsets); rmm::device_vector<cudf::bitmask_type> d_nulls(h_nulls); auto column = cudf::make_strings_column(d_buffer, d_offsets, d_nulls, null_count); EXPECT_EQ(column->type(), cudf::data_type{cudf::type_id::STRING}); EXPECT_EQ(column->null_count(), null_count); EXPECT_EQ(2, column->num_children()); cudf::strings_column_view strings_view(column->view()); EXPECT_EQ(strings_view.size(), count); EXPECT_EQ(strings_view.offsets().size(), count + 1); EXPECT_EQ(strings_view.chars().size(), memsize); // check string data auto strings_data = cudf::strings::create_offsets(strings_view); thrust::host_vector<char> h_chars_data(strings_data.first); thrust::host_vector<cudf::size_type> h_offsets_data(strings_data.second); EXPECT_EQ(memcmp(h_buffer.data(), h_chars_data.data(), h_buffer.size()), 0); EXPECT_EQ( memcmp(h_offsets.data(), h_offsets_data.data(), h_offsets.size() * sizeof(cudf::size_type)), 0); // check host version of the factory too auto column2 = cudf::make_strings_column(h_buffer, h_offsets, h_nulls, null_count); CUDF_TEST_EXPECT_COLUMNS_EQUAL(column->view(), column2->view()); } TEST_F(StringsFactoriesTest, CreateScalar) { std::string value = "test string"; auto s = cudf::make_string_scalar(value); auto string_s = static_cast<cudf::string_scalar*>(s.get()); EXPECT_EQ(string_s->to_string(), value); EXPECT_TRUE(string_s->is_valid()); EXPECT_TRUE(s->is_valid()); } TEST_F(StringsFactoriesTest, EmptyStringsColumn) { rmm::device_vector<char> d_chars; rmm::device_vector<cudf::size_type> d_offsets(1, 0); rmm::device_vector<cudf::bitmask_type> d_nulls; auto results = cudf::make_strings_column(d_chars, d_offsets, d_nulls, 0); cudf::test::expect_strings_empty(results->view()); rmm::device_vector<thrust::pair<const char*, cudf::size_type>> d_strings; results = cudf::make_strings_column(d_strings); cudf::test::expect_strings_empty(results->view()); } TEST_F(StringsFactoriesTest, CreateOffsets) { std::vector<std::string> strings = {"this", "is", "a", "column", "of", "strings"}; cudf::test::strings_column_wrapper sw = {strings.begin(), strings.end()}; cudf::column_view col(sw); std::vector<cudf::size_type> indices{0, 2, 3, 6}; auto result = cudf::slice(col, indices); std::vector<std::vector<std::string>> expecteds{ std::vector<std::string>{"this", "is"}, // [0,2) std::vector<std::string>{"column", "of", "strings"} // [3,6) }; for (size_t idx = 0; idx < result.size(); idx++) { auto strings_data = cudf::strings::create_offsets(cudf::strings_column_view(result[idx])); thrust::host_vector<char> h_chars(strings_data.first); thrust::host_vector<cudf::size_type> h_offsets(strings_data.second); auto expected_strings = expecteds[idx]; for (size_t jdx = 0; jdx < h_offsets.size() - 1; ++jdx) { auto offset = h_offsets[jdx]; auto length = h_offsets[jdx + 1] - offset; std::string str(h_chars.data() + offset, length); EXPECT_EQ(str, expected_strings[jdx]); } } }
d7790efc52c7bd1f1f7c96b89ddfd82a083766ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/user/kernels/softmax_cross_entropy_kernel.h" #include "oneflow/core/kernel/kernel_util.cuh" namespace oneflow { namespace user_op { namespace { template<typename T> __global__ void ComputeEntropyGpu(const int64_t num_instances, const int64_t num_classes, const T* x, const T* labels, T* y) { CUDA_1D_KERNEL_LOOP(i, num_instances * num_classes) { const int32_t row_id = i / num_classes; T label = labels[i]; T prob = x[i]; gpu_atomic_add(y + row_id, -label * SafeLog(prob)); } } __global__ void ComputeEntropyGpuHalf(const int64_t num_instances, const int64_t num_classes, const half* x, const half* labels, half* y) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 && TORCH_HIP_VERSION >= 10000 CUDA_1D_KERNEL_LOOP(i, num_instances * num_classes) { const int32_t row_id = i / num_classes; half label = labels[i]; half prob = x[i]; gpu_atomic_add(y + row_id, __hneg(__hmul(label, SafeLog<half>(prob)))); } #else printf("use half softmax cross entropy need nvcc arch >= 700 and cuda >= 10.0"); assert(false); #endif /* defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 && TORCH_HIP_VERSION >= 10000 */ } template<typename T> __global__ void ComputeDiffWithSoftmaxGpu(const int64_t elem_cnt, const int64_t num_classes, const T* prob, const T* labels, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(i, elem_cnt) { const int32_t row_id = i / num_classes; dx[i] = dy[row_id] * (prob[i] - labels[i]); } } __global__ void ComputeDiffWithSoftmaxGpuHalf(const int64_t elem_cnt, const int64_t num_classes, const half* prob, const half* labels, const half* dy, half* dx) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) CUDA_1D_KERNEL_LOOP(i, elem_cnt) { const int32_t row_id = i / num_classes; dx[i] = __hmul(dy[row_id], __hsub(prob[i], labels[i])); } #else printf("use half need nvcc arch >= 530"); assert(false); #endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/ } } // namespace template<typename T> struct CrossEntropyKernelUtil<DeviceType::kGPU, T> { static void ComputeEntropy(DeviceCtx* ctx, const int64_t num_instances, const int64_t num_classes, const T* x, const T* labels, T* y) { hipMemset(y, 0, sizeof(T) * num_instances); hipLaunchKernelGGL(( ComputeEntropyGpu), dim3(BlocksNum4ThreadsNum(num_instances)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), num_instances, num_classes, x, labels, y); } static void ComputeDiffWithSoftmax(DeviceCtx* ctx, const int64_t elem_cnt, const int64_t num_classes, const T* prob, const T* labels, const T* dy, T* dx) { hipLaunchKernelGGL(( ComputeDiffWithSoftmaxGpu), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), elem_cnt, num_classes, prob, labels, dy, dx); } }; template<> struct CrossEntropyKernelUtil<DeviceType::kGPU, float16> { static void ComputeEntropy(DeviceCtx* ctx, const int64_t num_instances, const int64_t num_classes, const float16* x, const float16* labels, float16* y) { hipMemset(y, 0, sizeof(float16) * num_instances); hipLaunchKernelGGL(( ComputeEntropyGpuHalf), dim3(BlocksNum4ThreadsNum(num_instances)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), num_instances, num_classes, reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(labels), reinterpret_cast<half*>(y)); } static void ComputeDiffWithSoftmax(DeviceCtx* ctx, const int64_t elem_cnt, const int64_t num_classes, const float16* prob, const float16* labels, const float16* dy, float16* dx) { hipLaunchKernelGGL(( ComputeDiffWithSoftmaxGpuHalf), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), elem_cnt, num_classes, reinterpret_cast<const half*>(prob), reinterpret_cast<const half*>(labels), reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); } }; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SOFTMAX_CROSS_ENTROPY_KERNEL, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kGPU), FLOATING_DATA_TYPE_SEQ FLOAT16_DATA_TYPE_SEQ) OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SOFTMAX_CROSS_ENTROPY_GRAD_KERNEL, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kGPU), FLOATING_DATA_TYPE_SEQ FLOAT16_DATA_TYPE_SEQ) } // namespace user_op } // namespace oneflow
d7790efc52c7bd1f1f7c96b89ddfd82a083766ae.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/user/kernels/softmax_cross_entropy_kernel.h" #include "oneflow/core/kernel/kernel_util.cuh" namespace oneflow { namespace user_op { namespace { template<typename T> __global__ void ComputeEntropyGpu(const int64_t num_instances, const int64_t num_classes, const T* x, const T* labels, T* y) { CUDA_1D_KERNEL_LOOP(i, num_instances * num_classes) { const int32_t row_id = i / num_classes; T label = labels[i]; T prob = x[i]; gpu_atomic_add(y + row_id, -label * SafeLog(prob)); } } __global__ void ComputeEntropyGpuHalf(const int64_t num_instances, const int64_t num_classes, const half* x, const half* labels, half* y) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 && CUDA_VERSION >= 10000 CUDA_1D_KERNEL_LOOP(i, num_instances * num_classes) { const int32_t row_id = i / num_classes; half label = labels[i]; half prob = x[i]; gpu_atomic_add(y + row_id, __hneg(__hmul(label, SafeLog<half>(prob)))); } #else printf("use half softmax cross entropy need nvcc arch >= 700 and cuda >= 10.0"); assert(false); #endif /* defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 && CUDA_VERSION >= 10000 */ } template<typename T> __global__ void ComputeDiffWithSoftmaxGpu(const int64_t elem_cnt, const int64_t num_classes, const T* prob, const T* labels, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(i, elem_cnt) { const int32_t row_id = i / num_classes; dx[i] = dy[row_id] * (prob[i] - labels[i]); } } __global__ void ComputeDiffWithSoftmaxGpuHalf(const int64_t elem_cnt, const int64_t num_classes, const half* prob, const half* labels, const half* dy, half* dx) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) CUDA_1D_KERNEL_LOOP(i, elem_cnt) { const int32_t row_id = i / num_classes; dx[i] = __hmul(dy[row_id], __hsub(prob[i], labels[i])); } #else printf("use half need nvcc arch >= 530"); assert(false); #endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/ } } // namespace template<typename T> struct CrossEntropyKernelUtil<DeviceType::kGPU, T> { static void ComputeEntropy(DeviceCtx* ctx, const int64_t num_instances, const int64_t num_classes, const T* x, const T* labels, T* y) { cudaMemset(y, 0, sizeof(T) * num_instances); ComputeEntropyGpu<<<BlocksNum4ThreadsNum(num_instances), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(num_instances, num_classes, x, labels, y); } static void ComputeDiffWithSoftmax(DeviceCtx* ctx, const int64_t elem_cnt, const int64_t num_classes, const T* prob, const T* labels, const T* dy, T* dx) { ComputeDiffWithSoftmaxGpu<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(elem_cnt, num_classes, prob, labels, dy, dx); } }; template<> struct CrossEntropyKernelUtil<DeviceType::kGPU, float16> { static void ComputeEntropy(DeviceCtx* ctx, const int64_t num_instances, const int64_t num_classes, const float16* x, const float16* labels, float16* y) { cudaMemset(y, 0, sizeof(float16) * num_instances); ComputeEntropyGpuHalf<<<BlocksNum4ThreadsNum(num_instances), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( num_instances, num_classes, reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(labels), reinterpret_cast<half*>(y)); } static void ComputeDiffWithSoftmax(DeviceCtx* ctx, const int64_t elem_cnt, const int64_t num_classes, const float16* prob, const float16* labels, const float16* dy, float16* dx) { ComputeDiffWithSoftmaxGpuHalf<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( elem_cnt, num_classes, reinterpret_cast<const half*>(prob), reinterpret_cast<const half*>(labels), reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); } }; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SOFTMAX_CROSS_ENTROPY_KERNEL, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kGPU), FLOATING_DATA_TYPE_SEQ FLOAT16_DATA_TYPE_SEQ) OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SOFTMAX_CROSS_ENTROPY_GRAD_KERNEL, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kGPU), FLOATING_DATA_TYPE_SEQ FLOAT16_DATA_TYPE_SEQ) } // namespace user_op } // namespace oneflow
d7e507c83235ead27cb83f6f95629c8f80393a61.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void divergenceL(float *v, float *d, int nx, int ny) { int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; /* float AX = 0; if ((idx < N) && (px<(nx-1))) AX += v[2*(idx )+0]; if ((idx < N) && (px>0)) AX -= v[2*(idx-1 )+0]; if ((idx < N) && (py<(ny-1))) AX += v[2*(idx )+1]; if ((idx < N) && (py>0)) AX -= v[2*(idx-nx)+1]; if (idx < N) d[idx] = AX; */ if(px<nx && py<ny) { float AX = 0; if((px<(nx - 1))) AX += v[2 * (idx)+0]; if((px>0)) AX -= v[2 * (idx - 1) + 0]; if((py<(ny - 1))) AX += v[2 * (idx)+1]; if((py>0)) AX -= v[2 * (idx - nx) + 1]; d[idx] = AX; } }
d7e507c83235ead27cb83f6f95629c8f80393a61.cu
#include "includes.h" __global__ void divergenceL(float *v, float *d, int nx, int ny) { int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; /* float AX = 0; if ((idx < N) && (px<(nx-1))) AX += v[2*(idx )+0]; if ((idx < N) && (px>0)) AX -= v[2*(idx-1 )+0]; if ((idx < N) && (py<(ny-1))) AX += v[2*(idx )+1]; if ((idx < N) && (py>0)) AX -= v[2*(idx-nx)+1]; if (idx < N) d[idx] = AX; */ if(px<nx && py<ny) { float AX = 0; if((px<(nx - 1))) AX += v[2 * (idx)+0]; if((px>0)) AX -= v[2 * (idx - 1) + 0]; if((py<(ny - 1))) AX += v[2 * (idx)+1]; if((py>0)) AX -= v[2 * (idx - nx) + 1]; d[idx] = AX; } }
5fb529b5d7810a6a5d674ddd4de9a16175b9aa98.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <memory.h> #include "xor_wow_data.hpp" #include "RNGState.h" struct RNG { unsigned* d_sequence_matrix; unsigned* d_offset_matrix; __device__ inline void state_init(unsigned long long seed, unsigned long long subsequence, unsigned long long offset, RNGState& state) { unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL; unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL; unsigned int t0 = 1099087573UL * s0; unsigned int t1 = 2591861531UL * s1; state.d = 6615241 + t1 + t0; state.v.v0 = 123456789UL + t0; state.v.v1 = 362436069UL ^ t0; state.v.v2 = 521288629UL + t1; state.v.v3 = 88675123UL ^ t1; state.v.v4 = 5783321UL + t0; // apply sequence matrix V5 result; unsigned long long p = subsequence; int i_mat = 0; unsigned matrix[800]; unsigned matrixA[800]; while (p && i_mat < 7) { for (unsigned int t = 0; t < (p & 3); t++) { matvec(state.v, d_sequence_matrix + i_mat * 800, result); state.v = result; } p >>= 2; i_mat++; } if (p) { memcpy(matrix, d_sequence_matrix + i_mat * 800, sizeof(unsigned) * 800); memcpy(matrixA, d_sequence_matrix + i_mat * 800, sizeof(unsigned) * 800); } while (p) { for (unsigned int t = 0; t < (p & 0xF); t++) { matvec(state.v, matrixA, result); state.v = result; } p >>= 4; if (p) { for (int i = 0; i < 4; i++) { matmat(matrix, matrixA); memcpy(matrixA, matrix, sizeof(unsigned) * 800); } } } // apply offset matrix p = offset; i_mat = 0; while (p && i_mat < 7) { for (unsigned int t = 0; t < (p & 3); t++) { matvec(state.v, d_offset_matrix + i_mat * 800, result); state.v = result; } p >>= 2; i_mat++; } if (p) { memcpy(matrix, d_offset_matrix + i_mat * 800, sizeof(unsigned) * 800); memcpy(matrixA, d_offset_matrix + i_mat * 800, sizeof(unsigned) * 800); } while (p) { for (unsigned int t = 0; t < (p & 0xF); t++) { matvec(state.v, matrixA, result); state.v = result; } p >>= 4; if (p) { for (int i = 0; i < 4; i++) { matmat(matrix, matrixA); memcpy(matrixA, matrix, sizeof(unsigned) * 800); } } } state.d += 362437 * (unsigned int)offset; } private: static __device__ inline void matvec_i(int i, unsigned v_i, const unsigned *matrix, V5& result) { for (int j = 0; j < 32; j++) if (v_i & (1 << j)) { V5 mat_row = ((V5*)matrix)[i * 32 + j]; result.v0 ^= mat_row.v0; result.v1 ^= mat_row.v1; result.v2 ^= mat_row.v2; result.v3 ^= mat_row.v3; result.v4 ^= mat_row.v4; } } static __device__ inline void matvec(const V5& vector, const unsigned *matrix, V5& result) { memset(&result, 0, sizeof(V5)); matvec_i(0, vector.v0, matrix, result); matvec_i(1, vector.v1, matrix, result); matvec_i(2, vector.v2, matrix, result); matvec_i(3, vector.v3, matrix, result); matvec_i(4, vector.v4, matrix, result); } static __device__ inline void matmat(unsigned int *matrixA, const unsigned int *matrixB) { V5 result; for (int i = 0; i < 160; i++) { matvec(((V5*)matrixA)[i], matrixB, result); ((V5*)matrixA)[i] = result; } } }; __global__ void g_rand_init(RNG rng, RNGState* d_states, unsigned count) { unsigned id = threadIdx.x + blockIdx.x*blockDim.x; if (id >= count) return; rng.state_init(1234, id, 0, d_states[id]); } void cu_rand_init(unsigned count, RNGState* d_states) { RNG rng; hipMalloc(&rng.d_sequence_matrix, sizeof(unsigned) * 800 * 8); hipMalloc(&rng.d_offset_matrix, sizeof(unsigned) * 800 * 8); hipMemcpy(rng.d_sequence_matrix, xorwow_sequence_matrix, sizeof(unsigned) * 800 * 8, hipMemcpyHostToDevice); hipMemcpy(rng.d_offset_matrix, xorwow_offset_matrix, sizeof(unsigned) * 800 * 8, hipMemcpyHostToDevice); unsigned blocks = (count + 127) / 128; g_rand_init << < blocks, 128 >> > (rng, d_states, count); hipFree(rng.d_offset_matrix); hipFree(rng.d_sequence_matrix); } void h_rand_init(unsigned count, RNGState* h_states) { RNGState* d_states; hipMalloc(&d_states, sizeof(RNGState)* count); cu_rand_init(count, d_states); hipMemcpy(h_states, d_states, sizeof(RNGState)* count, hipMemcpyDeviceToHost); hipFree(d_states); }
5fb529b5d7810a6a5d674ddd4de9a16175b9aa98.cu
#include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <memory.h> #include "xor_wow_data.hpp" #include "RNGState.h" struct RNG { unsigned* d_sequence_matrix; unsigned* d_offset_matrix; __device__ inline void state_init(unsigned long long seed, unsigned long long subsequence, unsigned long long offset, RNGState& state) { unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL; unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL; unsigned int t0 = 1099087573UL * s0; unsigned int t1 = 2591861531UL * s1; state.d = 6615241 + t1 + t0; state.v.v0 = 123456789UL + t0; state.v.v1 = 362436069UL ^ t0; state.v.v2 = 521288629UL + t1; state.v.v3 = 88675123UL ^ t1; state.v.v4 = 5783321UL + t0; // apply sequence matrix V5 result; unsigned long long p = subsequence; int i_mat = 0; unsigned matrix[800]; unsigned matrixA[800]; while (p && i_mat < 7) { for (unsigned int t = 0; t < (p & 3); t++) { matvec(state.v, d_sequence_matrix + i_mat * 800, result); state.v = result; } p >>= 2; i_mat++; } if (p) { memcpy(matrix, d_sequence_matrix + i_mat * 800, sizeof(unsigned) * 800); memcpy(matrixA, d_sequence_matrix + i_mat * 800, sizeof(unsigned) * 800); } while (p) { for (unsigned int t = 0; t < (p & 0xF); t++) { matvec(state.v, matrixA, result); state.v = result; } p >>= 4; if (p) { for (int i = 0; i < 4; i++) { matmat(matrix, matrixA); memcpy(matrixA, matrix, sizeof(unsigned) * 800); } } } // apply offset matrix p = offset; i_mat = 0; while (p && i_mat < 7) { for (unsigned int t = 0; t < (p & 3); t++) { matvec(state.v, d_offset_matrix + i_mat * 800, result); state.v = result; } p >>= 2; i_mat++; } if (p) { memcpy(matrix, d_offset_matrix + i_mat * 800, sizeof(unsigned) * 800); memcpy(matrixA, d_offset_matrix + i_mat * 800, sizeof(unsigned) * 800); } while (p) { for (unsigned int t = 0; t < (p & 0xF); t++) { matvec(state.v, matrixA, result); state.v = result; } p >>= 4; if (p) { for (int i = 0; i < 4; i++) { matmat(matrix, matrixA); memcpy(matrixA, matrix, sizeof(unsigned) * 800); } } } state.d += 362437 * (unsigned int)offset; } private: static __device__ inline void matvec_i(int i, unsigned v_i, const unsigned *matrix, V5& result) { for (int j = 0; j < 32; j++) if (v_i & (1 << j)) { V5 mat_row = ((V5*)matrix)[i * 32 + j]; result.v0 ^= mat_row.v0; result.v1 ^= mat_row.v1; result.v2 ^= mat_row.v2; result.v3 ^= mat_row.v3; result.v4 ^= mat_row.v4; } } static __device__ inline void matvec(const V5& vector, const unsigned *matrix, V5& result) { memset(&result, 0, sizeof(V5)); matvec_i(0, vector.v0, matrix, result); matvec_i(1, vector.v1, matrix, result); matvec_i(2, vector.v2, matrix, result); matvec_i(3, vector.v3, matrix, result); matvec_i(4, vector.v4, matrix, result); } static __device__ inline void matmat(unsigned int *matrixA, const unsigned int *matrixB) { V5 result; for (int i = 0; i < 160; i++) { matvec(((V5*)matrixA)[i], matrixB, result); ((V5*)matrixA)[i] = result; } } }; __global__ void g_rand_init(RNG rng, RNGState* d_states, unsigned count) { unsigned id = threadIdx.x + blockIdx.x*blockDim.x; if (id >= count) return; rng.state_init(1234, id, 0, d_states[id]); } void cu_rand_init(unsigned count, RNGState* d_states) { RNG rng; cudaMalloc(&rng.d_sequence_matrix, sizeof(unsigned) * 800 * 8); cudaMalloc(&rng.d_offset_matrix, sizeof(unsigned) * 800 * 8); cudaMemcpy(rng.d_sequence_matrix, xorwow_sequence_matrix, sizeof(unsigned) * 800 * 8, cudaMemcpyHostToDevice); cudaMemcpy(rng.d_offset_matrix, xorwow_offset_matrix, sizeof(unsigned) * 800 * 8, cudaMemcpyHostToDevice); unsigned blocks = (count + 127) / 128; g_rand_init << < blocks, 128 >> > (rng, d_states, count); cudaFree(rng.d_offset_matrix); cudaFree(rng.d_sequence_matrix); } void h_rand_init(unsigned count, RNGState* h_states) { RNGState* d_states; cudaMalloc(&d_states, sizeof(RNGState)* count); cu_rand_init(count, d_states); cudaMemcpy(h_states, d_states, sizeof(RNGState)* count, cudaMemcpyDeviceToHost); cudaFree(d_states); }
80f96ec97b80ed07d48826c1fcdde1d6dffbfde5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated d Tue Dec 17 13:18:45 2013 @author Stan Tomov */ #include "common_magma.h" #define PRECISION_d #include "commonblas.h" __global__ void dtranspose3_32( double *B, int ldb, const double *A, int lda, int m, int m32, int n, int n32) { __shared__ double sA[32][DSIZE_1SHARED+1]; int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x*32; int iby = blockIdx.y*32; A += ibx + inx + __mul24( iby + iny, lda ); B += iby + inx + __mul24( ibx + iny, ldb ); int t2 = iby+iny; if (ibx+inx < m) { if (t2 < n) { sA[iny+0][inx] = A[0*lda]; if (t2+ 8 < n) { sA[iny+8][inx] = A[8*lda]; if (t2 + 16 < n) { sA[iny+16][inx] = A[16*lda]; if (t2 + 24 < n) { sA[iny+24][inx] = A[24*lda]; } } } } } __syncthreads(); #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) if (iby + inx < n) { if (ibx + iny < m) { B[0*ldb] = sA[inx][iny+0]; if (ibx + iny + 8 < m) { B[8*ldb] = sA[inx][iny+8]; if (ibx + iny +16 < m) { B[16*ldb] = sA[inx][iny+16]; if (ibx + iny + 24 < m) { B[24*ldb] = sA[inx][iny+24]; } } } } } #else /* defined(PRECISION_z) */ if (iby + inx < n) { if (ibx + iny < m) { B[0*ldb] = sA[inx][iny+0]; if (ibx + iny + 8 < m) { B[8*ldb] = sA[inx][iny+8]; } } if (iby + inx + 16 < n) { if (ibx + iny < m) { B[0*ldb+16] = sA[inx+16][iny+0]; if (ibx + iny + 8 < m) { B[8*ldb+16] = sA[inx+16][iny+8]; } } } } __syncthreads(); A += DSIZE_1SHARED; B += __mul24( 16, ldb ); sA[iny+ 0][inx] = A[ 0*lda]; sA[iny+ 8][inx] = A[ 8*lda]; sA[iny+16][inx] = A[16*lda]; sA[iny+24][inx] = A[24*lda]; __syncthreads(); if (iby + inx < n) { if (ibx + iny + 16 < m) { B[0*ldb] = sA[inx][iny+0]; if (ibx + iny + 24 < m) { B[8*ldb] = sA[inx][iny+8]; } } if (iby + inx + 16 < n) { if (ibx + iny + 16 < m) { B[0*ldb+16] = sA[inx+16][iny+0]; if (ibx + iny + 24 < m) { B[8*ldb+16] = sA[inx+16][iny+8]; } } } } #endif } __global__ void dtranspose2_32( double *B, int ldb, const double *A, int lda, int m, int m32, int n, int n32) { __shared__ double sA[32][DSIZE_1SHARED+1]; int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x*32; int iby = blockIdx.y*32; int dx, dy; if (ibx+32 < m) dx = 0; else dx = m32; if (iby+32 < n) dy = 0; else dy = n32; A += ibx + inx - dx + __mul24( iby + iny - dy, lda ); B += iby + inx - dy + __mul24( ibx + iny - dx, ldb ); sA[iny+0][inx] = A[0*lda]; sA[iny+8][inx] = A[8*lda]; sA[iny+16][inx] = A[16*lda]; sA[iny+24][inx] = A[24*lda]; __syncthreads(); #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) B[ 0*ldb] = sA[inx][iny+0]; B[ 8*ldb] = sA[inx][iny+8]; B[16*ldb] = sA[inx][iny+16]; B[24*ldb] = sA[inx][iny+24]; #else /* defined(PRECISION_z) */ B[0*ldb] = sA[inx][iny+0]; B[8*ldb] = sA[inx][iny+8]; B[0*ldb+16] = sA[inx+16][iny+0]; B[8*ldb+16] = sA[inx+16][iny+8]; __syncthreads(); A += DSIZE_1SHARED; B += __mul24( 16, ldb ); sA[iny+ 0][inx] = A[ 0*lda]; sA[iny+ 8][inx] = A[ 8*lda]; sA[iny+16][inx] = A[16*lda]; sA[iny+24][inx] = A[24*lda]; __syncthreads(); B[0*ldb] = sA[inx ][iny+0]; B[8*ldb] = sA[inx ][iny+8]; B[0*ldb+16] = sA[inx+16][iny+0]; B[8*ldb+16] = sA[inx+16][iny+8]; #endif } // // m, n - dimensions in the source (input) matrix // This version transposes for general m, n . // Note that ldi >= m and ldo >= n. // extern "C" void magmablas_dtranspose2(double *odata, magma_int_t ldo, const double *idata, magma_int_t ldi, magma_int_t m, magma_int_t n ) { /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( DSIZE_1SHARED, 8, 1 ); dim3 grid( (m+31)/32, (n+31)/32, 1 ); hipLaunchKernelGGL(( dtranspose3_32), dim3(grid), dim3(threads), 0, magma_stream , odata, ldo, idata, ldi, m, (32-m%32)%32, n, (32-n%32)%32 ); } extern "C" void magmablas_dtranspose2s(double *odata, magma_int_t ldo, const double *idata, magma_int_t ldi, magma_int_t m, magma_int_t n, magma_queue_t stream ) { /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( DSIZE_1SHARED, 8, 1 ); dim3 grid( (m+31)/32, (n+31)/32, 1 ); hipLaunchKernelGGL(( dtranspose3_32), dim3(grid), dim3(threads), 0, stream , odata, ldo, idata, ldi, m, (32-m%32)%32, n, (32-n%32)%32 ); }
80f96ec97b80ed07d48826c1fcdde1d6dffbfde5.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated d Tue Dec 17 13:18:45 2013 @author Stan Tomov */ #include "common_magma.h" #define PRECISION_d #include "commonblas.h" __global__ void dtranspose3_32( double *B, int ldb, const double *A, int lda, int m, int m32, int n, int n32) { __shared__ double sA[32][DSIZE_1SHARED+1]; int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x*32; int iby = blockIdx.y*32; A += ibx + inx + __mul24( iby + iny, lda ); B += iby + inx + __mul24( ibx + iny, ldb ); int t2 = iby+iny; if (ibx+inx < m) { if (t2 < n) { sA[iny+0][inx] = A[0*lda]; if (t2+ 8 < n) { sA[iny+8][inx] = A[8*lda]; if (t2 + 16 < n) { sA[iny+16][inx] = A[16*lda]; if (t2 + 24 < n) { sA[iny+24][inx] = A[24*lda]; } } } } } __syncthreads(); #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) if (iby + inx < n) { if (ibx + iny < m) { B[0*ldb] = sA[inx][iny+0]; if (ibx + iny + 8 < m) { B[8*ldb] = sA[inx][iny+8]; if (ibx + iny +16 < m) { B[16*ldb] = sA[inx][iny+16]; if (ibx + iny + 24 < m) { B[24*ldb] = sA[inx][iny+24]; } } } } } #else /* defined(PRECISION_z) */ if (iby + inx < n) { if (ibx + iny < m) { B[0*ldb] = sA[inx][iny+0]; if (ibx + iny + 8 < m) { B[8*ldb] = sA[inx][iny+8]; } } if (iby + inx + 16 < n) { if (ibx + iny < m) { B[0*ldb+16] = sA[inx+16][iny+0]; if (ibx + iny + 8 < m) { B[8*ldb+16] = sA[inx+16][iny+8]; } } } } __syncthreads(); A += DSIZE_1SHARED; B += __mul24( 16, ldb ); sA[iny+ 0][inx] = A[ 0*lda]; sA[iny+ 8][inx] = A[ 8*lda]; sA[iny+16][inx] = A[16*lda]; sA[iny+24][inx] = A[24*lda]; __syncthreads(); if (iby + inx < n) { if (ibx + iny + 16 < m) { B[0*ldb] = sA[inx][iny+0]; if (ibx + iny + 24 < m) { B[8*ldb] = sA[inx][iny+8]; } } if (iby + inx + 16 < n) { if (ibx + iny + 16 < m) { B[0*ldb+16] = sA[inx+16][iny+0]; if (ibx + iny + 24 < m) { B[8*ldb+16] = sA[inx+16][iny+8]; } } } } #endif } __global__ void dtranspose2_32( double *B, int ldb, const double *A, int lda, int m, int m32, int n, int n32) { __shared__ double sA[32][DSIZE_1SHARED+1]; int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x*32; int iby = blockIdx.y*32; int dx, dy; if (ibx+32 < m) dx = 0; else dx = m32; if (iby+32 < n) dy = 0; else dy = n32; A += ibx + inx - dx + __mul24( iby + iny - dy, lda ); B += iby + inx - dy + __mul24( ibx + iny - dx, ldb ); sA[iny+0][inx] = A[0*lda]; sA[iny+8][inx] = A[8*lda]; sA[iny+16][inx] = A[16*lda]; sA[iny+24][inx] = A[24*lda]; __syncthreads(); #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) B[ 0*ldb] = sA[inx][iny+0]; B[ 8*ldb] = sA[inx][iny+8]; B[16*ldb] = sA[inx][iny+16]; B[24*ldb] = sA[inx][iny+24]; #else /* defined(PRECISION_z) */ B[0*ldb] = sA[inx][iny+0]; B[8*ldb] = sA[inx][iny+8]; B[0*ldb+16] = sA[inx+16][iny+0]; B[8*ldb+16] = sA[inx+16][iny+8]; __syncthreads(); A += DSIZE_1SHARED; B += __mul24( 16, ldb ); sA[iny+ 0][inx] = A[ 0*lda]; sA[iny+ 8][inx] = A[ 8*lda]; sA[iny+16][inx] = A[16*lda]; sA[iny+24][inx] = A[24*lda]; __syncthreads(); B[0*ldb] = sA[inx ][iny+0]; B[8*ldb] = sA[inx ][iny+8]; B[0*ldb+16] = sA[inx+16][iny+0]; B[8*ldb+16] = sA[inx+16][iny+8]; #endif } // // m, n - dimensions in the source (input) matrix // This version transposes for general m, n . // Note that ldi >= m and ldo >= n. // extern "C" void magmablas_dtranspose2(double *odata, magma_int_t ldo, const double *idata, magma_int_t ldi, magma_int_t m, magma_int_t n ) { /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( DSIZE_1SHARED, 8, 1 ); dim3 grid( (m+31)/32, (n+31)/32, 1 ); dtranspose3_32<<< grid, threads, 0, magma_stream >>>( odata, ldo, idata, ldi, m, (32-m%32)%32, n, (32-n%32)%32 ); } extern "C" void magmablas_dtranspose2s(double *odata, magma_int_t ldo, const double *idata, magma_int_t ldi, magma_int_t m, magma_int_t n, magma_queue_t stream ) { /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( DSIZE_1SHARED, 8, 1 ); dim3 grid( (m+31)/32, (n+31)/32, 1 ); dtranspose3_32<<< grid, threads, 0, stream >>>( odata, ldo, idata, ldi, m, (32-m%32)%32, n, (32-n%32)%32 ); }
dca6039f7b32e56668142caf0f77b8a3a30a682e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "non_deterministic_ints_impl.cuh" template<typename T> __global__ void NonDeterministicIntsKernel(int seed, hiprandStatePhilox4_32_10_t *globalState, T *output, size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count / 4); i += blockDim.x * gridDim.x) { hiprand_init(seed, i, 0, &globalState[i]); uint4 i4 = hiprand4(&globalState[i]); output[i * 4] = i4.x; output[i * 4 + 1] = i4.y; output[i * 4 + 2] = i4.z; output[i * 4 + 3] = i4.w; } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { hiprand_init(seed, 0, 0, &globalState[0]); uint4 i4 = hiprand4(&globalState[0]); size_t val = count % 4; for (size_t i = 0; i < val; i++) { output[count-i-1] = (&i4.x)[i]; } } return; } template<> __global__ void NonDeterministicIntsKernel(int seed, hiprandStatePhilox4_32_10_t *globalState, int32_t *output, size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count / 4); i += blockDim.x * gridDim.x) { hiprand_init(seed, i, 0, &globalState[i]); uint4 i4 = hiprand4(&globalState[i]); output[i * 4] = i4.x; output[i * 4 + 1] = i4.y; output[i * 4 + 2] = i4.z; output[i * 4 + 3] = i4.w; } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { hiprand_init(seed, 0, 0, &globalState[0]); uint4 i4 = hiprand4(&globalState[0]); size_t val = count % 4; for (size_t i = 0; i < val; i++) { output[count-i-1] = (&i4.x)[i]; } } return; } template<> __global__ void NonDeterministicIntsKernel(int seed, hiprandStatePhilox4_32_10_t *globalState, int64_t *output, size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count / 2); i += blockDim.x * gridDim.x) { hiprand_init(seed, i, 0, &globalState[i]); uint4 i4 = hiprand4(&globalState[i]); output[i * 2] = ((int64_t)i4.x << 32) | i4.y; output[i * 2 + 1] = ((int64_t)i4.z << 32) | i4.w; } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { hiprand_init(seed, 0, 0, &globalState[0]); uint4 i4 = hiprand4(&globalState[0]); if (count & 1) { output[count-1] = ((int64_t)i4.x << 32) | i4.y; } } return; } template<> __global__ void NonDeterministicIntsKernel(int seed, hiprandStatePhilox4_32_10_t *globalState, uint32_t *output, size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count / 4); i += blockDim.x * gridDim.x) { hiprand_init(seed, i, 0, &globalState[i]); uint4 i4 = hiprand4(&globalState[i]); output[i * 4] = i4.x; output[i * 4 + 1] = i4.y; output[i * 4 + 2] = i4.z; output[i * 4 + 3] = i4.w; } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { hiprand_init(seed, 0, 0, &globalState[0]); uint4 i4 = hiprand4(&globalState[0]); size_t val = count % 4; for (size_t i = 0; i < val; i++) { output[count-i-1] = (&i4.x)[i]; } } return; } template<> __global__ void NonDeterministicIntsKernel(int seed, hiprandStatePhilox4_32_10_t *globalState, uint64_t *output, size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count / 2); i += blockDim.x * gridDim.x) { hiprand_init(seed, i, 0, &globalState[i]); uint4 i4 = hiprand4(&globalState[i]); output[i * 2] = ((int64_t)i4.x << 32) | i4.y; output[i * 2 + 1] = ((int64_t)i4.z << 32) | i4.w; } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { hiprand_init(seed, 0, 0, &globalState[0]); uint4 i4 = hiprand4(&globalState[0]); if (count & 1) { output[count-1] = ((int64_t)i4.x << 32) | i4.y; } } return; } template<typename T> void LaunchNonDeterministicInts(hiprandStatePhilox4_32_10_t *globalState, T *output, size_t count, const uint32_t &device_id, hipStream_t cuda_stream) { std::random_device rd; int seed = static_cast<int>(rd()); hipLaunchKernelGGL(( NonDeterministicIntsKernel), dim3(CUDA_BLOCKS(device_id, count)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, seed, globalState, output, count); return; } template CUDA_LIB_EXPORT void LaunchNonDeterministicInts<int32_t>(hiprandStatePhilox4_32_10_t *globalState, int32_t *output, size_t count, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void LaunchNonDeterministicInts<int64_t>(hiprandStatePhilox4_32_10_t *globalState, int64_t *output, size_t count, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void LaunchNonDeterministicInts<uint32_t>(hiprandStatePhilox4_32_10_t *globalState, uint32_t *output, size_t count, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void LaunchNonDeterministicInts<uint64_t>(hiprandStatePhilox4_32_10_t *globalState, uint64_t *output, size_t count, const uint32_t &device_id, hipStream_t cuda_stream);
dca6039f7b32e56668142caf0f77b8a3a30a682e.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "non_deterministic_ints_impl.cuh" template<typename T> __global__ void NonDeterministicIntsKernel(int seed, curandStatePhilox4_32_10_t *globalState, T *output, size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count / 4); i += blockDim.x * gridDim.x) { curand_init(seed, i, 0, &globalState[i]); uint4 i4 = curand4(&globalState[i]); output[i * 4] = i4.x; output[i * 4 + 1] = i4.y; output[i * 4 + 2] = i4.z; output[i * 4 + 3] = i4.w; } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { curand_init(seed, 0, 0, &globalState[0]); uint4 i4 = curand4(&globalState[0]); size_t val = count % 4; for (size_t i = 0; i < val; i++) { output[count-i-1] = (&i4.x)[i]; } } return; } template<> __global__ void NonDeterministicIntsKernel(int seed, curandStatePhilox4_32_10_t *globalState, int32_t *output, size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count / 4); i += blockDim.x * gridDim.x) { curand_init(seed, i, 0, &globalState[i]); uint4 i4 = curand4(&globalState[i]); output[i * 4] = i4.x; output[i * 4 + 1] = i4.y; output[i * 4 + 2] = i4.z; output[i * 4 + 3] = i4.w; } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { curand_init(seed, 0, 0, &globalState[0]); uint4 i4 = curand4(&globalState[0]); size_t val = count % 4; for (size_t i = 0; i < val; i++) { output[count-i-1] = (&i4.x)[i]; } } return; } template<> __global__ void NonDeterministicIntsKernel(int seed, curandStatePhilox4_32_10_t *globalState, int64_t *output, size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count / 2); i += blockDim.x * gridDim.x) { curand_init(seed, i, 0, &globalState[i]); uint4 i4 = curand4(&globalState[i]); output[i * 2] = ((int64_t)i4.x << 32) | i4.y; output[i * 2 + 1] = ((int64_t)i4.z << 32) | i4.w; } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { curand_init(seed, 0, 0, &globalState[0]); uint4 i4 = curand4(&globalState[0]); if (count & 1) { output[count-1] = ((int64_t)i4.x << 32) | i4.y; } } return; } template<> __global__ void NonDeterministicIntsKernel(int seed, curandStatePhilox4_32_10_t *globalState, uint32_t *output, size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count / 4); i += blockDim.x * gridDim.x) { curand_init(seed, i, 0, &globalState[i]); uint4 i4 = curand4(&globalState[i]); output[i * 4] = i4.x; output[i * 4 + 1] = i4.y; output[i * 4 + 2] = i4.z; output[i * 4 + 3] = i4.w; } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { curand_init(seed, 0, 0, &globalState[0]); uint4 i4 = curand4(&globalState[0]); size_t val = count % 4; for (size_t i = 0; i < val; i++) { output[count-i-1] = (&i4.x)[i]; } } return; } template<> __global__ void NonDeterministicIntsKernel(int seed, curandStatePhilox4_32_10_t *globalState, uint64_t *output, size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count / 2); i += blockDim.x * gridDim.x) { curand_init(seed, i, 0, &globalState[i]); uint4 i4 = curand4(&globalState[i]); output[i * 2] = ((int64_t)i4.x << 32) | i4.y; output[i * 2 + 1] = ((int64_t)i4.z << 32) | i4.w; } if (blockIdx.x * blockDim.x + threadIdx.x == 0) { curand_init(seed, 0, 0, &globalState[0]); uint4 i4 = curand4(&globalState[0]); if (count & 1) { output[count-1] = ((int64_t)i4.x << 32) | i4.y; } } return; } template<typename T> void LaunchNonDeterministicInts(curandStatePhilox4_32_10_t *globalState, T *output, size_t count, const uint32_t &device_id, cudaStream_t cuda_stream) { std::random_device rd; int seed = static_cast<int>(rd()); NonDeterministicIntsKernel<<<CUDA_BLOCKS(device_id, count), CUDA_THREADS(device_id), 0, cuda_stream>>>(seed, globalState, output, count); return; } template CUDA_LIB_EXPORT void LaunchNonDeterministicInts<int32_t>(curandStatePhilox4_32_10_t *globalState, int32_t *output, size_t count, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void LaunchNonDeterministicInts<int64_t>(curandStatePhilox4_32_10_t *globalState, int64_t *output, size_t count, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void LaunchNonDeterministicInts<uint32_t>(curandStatePhilox4_32_10_t *globalState, uint32_t *output, size_t count, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void LaunchNonDeterministicInts<uint64_t>(curandStatePhilox4_32_10_t *globalState, uint64_t *output, size_t count, const uint32_t &device_id, cudaStream_t cuda_stream);
517d77f4aee0c34c862afc87584a00146c62abe7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "test.cuh" #include <iostream> #include <algorithm> using namespace std; unsigned char *d_screen; int16_t *d_vol; OctreeNode* d_octree; __device__ __constant__ int window_level = 45; __device__ int window_width; __device__ float getIntensity(int16_t* d_vol, int width, int height, int depth, float x, float y, float z) { float val = d_vol[(int)floor(z) * width * height + (int)floor(y)* width + (int)floor(x)]; // TODO: global variables are always zero. Why? //printf("%d %d\n", window_level, window_width); float window_min = 50 - 350 / 2; float window_max = 50 + 350 / 2; if (val < window_min) val = window_min; if (val > window_max) val = window_max; val = (val - window_min) / (float)(window_max - window_min); return val; } __device__ bool intersect(glm::vec4 origin, glm::vec4 dir, glm::vec4* bounds, float &t) { glm::vec4 inv_dir(1. / dir[0], 1. / dir[1], 1. / dir[2], 0.); double t1 = (bounds[0][0] - origin[0])*inv_dir[0]; double t2 = (bounds[1][0] - origin[0])*inv_dir[0]; double tmin = min(t1, t2); double tmax = max(t1, t2); for (int i = 1; i < 3; ++i) { t1 = (bounds[0][i] - origin[i])*inv_dir[i]; t2 = (bounds[1][i] - origin[i])*inv_dir[i]; tmin = max(tmin, min(t1, t2)); tmax = min(tmax, max(t1, t2)); } t = tmin; return tmax > max(tmin, 0.0); } // Raycast into the volume __device__ glm::vec4 rayCast(int16_t* d_vol, glm::vec4 origin, glm::vec4 dir, int width, int height, int depth) { // Find start and end points. glm::vec4 start, end, min_bound, max_bound; min_bound = glm::vec4(-width / 2.f, -height / 2.f, -depth / 2.f, 1); max_bound = glm::vec4(width / 2.f, height / 2.f, depth / 2.f, 1); glm::vec4 bounds[2]; bounds[0] = min_bound; bounds[1] = max_bound; float t; bool is_intersect = intersect(origin, dir, bounds, t); if (!is_intersect) { // RGBA color glm::vec4 color(0, 0, 0, 255); return color; } float max_val = 0.0f; // Sampling from start to end glm::vec4 cur = origin + dir * t; while (true) { // Terminate condition if (cur.x > max_bound.x || cur.x < min_bound.x || cur.y > max_bound.y || cur.y < min_bound.y || cur.z > max_bound.z || cur.z < min_bound.z) break; // Get origin coordinates float x = cur.x + width / 2.f; float y = cur.y + height / 2.f; float z = cur.z + depth / 2.f; if (x < 0 || y < 0 || z < 0 || x >= width || y >= height || z >= depth) { // Ray go cur.x += dir.x; cur.y += dir.y; cur.z += dir.z; continue; } float val = getIntensity(d_vol, width, height, depth, x, y, z); if (max_val < val) max_val = val; // Ray go cur.x += dir.x; cur.y += dir.y; cur.z += dir.z; } // RGBA color glm::vec4 color(max_val * 255, max_val * 255, max_val * 255, 255); return color; } __global__ void render(int16_t* d_vol, unsigned char *d_screen, int width, int height, int depth, glm::vec3 scr_corner, glm::vec3 scr_delta_x, glm::vec3 scr_delta_y, glm::vec4 ray_dir) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx = idx_y * 1000 * 4 + idx_x * 4; // 1000 means width, 4 means channel glm::vec3 cur = scr_corner + (float)idx_x * scr_delta_x + (float)idx_y * scr_delta_y; glm::vec4 origin(cur, 1); glm::vec4 color = rayCast(d_vol, origin, ray_dir, width, height, depth); d_screen[idx + 0] = color.x; d_screen[idx + 1] = color.y; d_screen[idx + 2] = color.z; d_screen[idx + 3] = color.w; } void allocateScreenCuda(int scr_width, int scr_height) { hipError_t rc = hipMalloc((void**)&d_screen, sizeof(unsigned char) * scr_width * scr_height * 4); if (rc != hipSuccess) printf("Could not allocate memory: %d\n", rc); } void copyVolCuda(vdcm::Volume* vol) { std::vector<std::vector<int16_t> > *raw_data = &(vol->m_volume_data); int width = vol->getWidth(); int height = vol->getHeight(); int depth = vol->getDepth(); // Allocate volume data in gpu. hipMalloc((void**)&d_vol, sizeof(int16_t) * width * height * depth); int16_t *dst = d_vol; for (std::vector<std::vector<int16_t> >::iterator it = raw_data->begin(); it != raw_data->end(); ++it) { int16_t *src = &((*it)[0]); size_t sz = it->size(); hipMemcpy(dst, src, sizeof(int16_t)*sz, hipMemcpyHostToDevice); dst += sz; } } void copyOctree(Octree* tree) { hipMalloc((void**)&d_octree, sizeof(OctreeNode) * tree->size); hipMemcpy(d_octree, tree->root, sizeof(OctreeNode)*tree->size, hipMemcpyHostToDevice); } int rayCastCuda(vdcm::Volume* vol, glm::vec3 scr_center, glm::vec3 scr_delta_x, glm::vec3 scr_delta_y, int scr_width, int scr_height, unsigned char *h_screen) { //float *raw_data = vol->getBuffer(); //allocateScreenCuda(scr_width, scr_height); //copyVolCuda(vol); //printf("Get volume\n"); int wl = std::get<0>(vol->getDefaultWindowing()); window_width = std::get<1>(vol->getDefaultWindowing()); printf("%d %d\n", wl, window_width); //hipMalloc((void**)&window_level, sizeof(int)); hipMemcpyToSymbol(&window_level, &wl, sizeof(int),0,hipMemcpyHostToDevice); int width = vol->getWidth(); int height = vol->getHeight(); int depth = vol->getDepth(); dim3 block_size(16, 16, 1); dim3 numBlocks(scr_width / block_size.x, scr_height / block_size.y); glm::vec4 volume_center(0, 0, 0, 1); glm::vec4 direction; glm::vec3 scr_start_corner; // Start point left bottom scr_start_corner = scr_center - (scr_width / 2.f) * scr_delta_x - (scr_height / 2.f) * scr_delta_y; // Assume that only consider parallel ray direction = volume_center - glm::vec4(scr_center, 1.); direction = glm::normalize(direction); render << <numBlocks, block_size >> > (d_vol, d_screen, width, height, depth, scr_start_corner, scr_delta_x, scr_delta_y, direction); hipStreamSynchronize(0); hipMemcpy(h_screen, d_screen, sizeof(unsigned char) * scr_width * scr_height * 4, hipMemcpyDeviceToHost); //hipFree(d_screen); //hipFree(d_vol); return true; }
517d77f4aee0c34c862afc87584a00146c62abe7.cu
#include "test.cuh" #include <iostream> #include <algorithm> using namespace std; unsigned char *d_screen; int16_t *d_vol; OctreeNode* d_octree; __device__ __constant__ int window_level = 45; __device__ int window_width; __device__ float getIntensity(int16_t* d_vol, int width, int height, int depth, float x, float y, float z) { float val = d_vol[(int)floor(z) * width * height + (int)floor(y)* width + (int)floor(x)]; // TODO: global variables are always zero. Why? //printf("%d %d\n", window_level, window_width); float window_min = 50 - 350 / 2; float window_max = 50 + 350 / 2; if (val < window_min) val = window_min; if (val > window_max) val = window_max; val = (val - window_min) / (float)(window_max - window_min); return val; } __device__ bool intersect(glm::vec4 origin, glm::vec4 dir, glm::vec4* bounds, float &t) { glm::vec4 inv_dir(1. / dir[0], 1. / dir[1], 1. / dir[2], 0.); double t1 = (bounds[0][0] - origin[0])*inv_dir[0]; double t2 = (bounds[1][0] - origin[0])*inv_dir[0]; double tmin = min(t1, t2); double tmax = max(t1, t2); for (int i = 1; i < 3; ++i) { t1 = (bounds[0][i] - origin[i])*inv_dir[i]; t2 = (bounds[1][i] - origin[i])*inv_dir[i]; tmin = max(tmin, min(t1, t2)); tmax = min(tmax, max(t1, t2)); } t = tmin; return tmax > max(tmin, 0.0); } // Raycast into the volume __device__ glm::vec4 rayCast(int16_t* d_vol, glm::vec4 origin, glm::vec4 dir, int width, int height, int depth) { // Find start and end points. glm::vec4 start, end, min_bound, max_bound; min_bound = glm::vec4(-width / 2.f, -height / 2.f, -depth / 2.f, 1); max_bound = glm::vec4(width / 2.f, height / 2.f, depth / 2.f, 1); glm::vec4 bounds[2]; bounds[0] = min_bound; bounds[1] = max_bound; float t; bool is_intersect = intersect(origin, dir, bounds, t); if (!is_intersect) { // RGBA color glm::vec4 color(0, 0, 0, 255); return color; } float max_val = 0.0f; // Sampling from start to end glm::vec4 cur = origin + dir * t; while (true) { // Terminate condition if (cur.x > max_bound.x || cur.x < min_bound.x || cur.y > max_bound.y || cur.y < min_bound.y || cur.z > max_bound.z || cur.z < min_bound.z) break; // Get origin coordinates float x = cur.x + width / 2.f; float y = cur.y + height / 2.f; float z = cur.z + depth / 2.f; if (x < 0 || y < 0 || z < 0 || x >= width || y >= height || z >= depth) { // Ray go cur.x += dir.x; cur.y += dir.y; cur.z += dir.z; continue; } float val = getIntensity(d_vol, width, height, depth, x, y, z); if (max_val < val) max_val = val; // Ray go cur.x += dir.x; cur.y += dir.y; cur.z += dir.z; } // RGBA color glm::vec4 color(max_val * 255, max_val * 255, max_val * 255, 255); return color; } __global__ void render(int16_t* d_vol, unsigned char *d_screen, int width, int height, int depth, glm::vec3 scr_corner, glm::vec3 scr_delta_x, glm::vec3 scr_delta_y, glm::vec4 ray_dir) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx = idx_y * 1000 * 4 + idx_x * 4; // 1000 means width, 4 means channel glm::vec3 cur = scr_corner + (float)idx_x * scr_delta_x + (float)idx_y * scr_delta_y; glm::vec4 origin(cur, 1); glm::vec4 color = rayCast(d_vol, origin, ray_dir, width, height, depth); d_screen[idx + 0] = color.x; d_screen[idx + 1] = color.y; d_screen[idx + 2] = color.z; d_screen[idx + 3] = color.w; } void allocateScreenCuda(int scr_width, int scr_height) { cudaError_t rc = cudaMalloc((void**)&d_screen, sizeof(unsigned char) * scr_width * scr_height * 4); if (rc != cudaSuccess) printf("Could not allocate memory: %d\n", rc); } void copyVolCuda(vdcm::Volume* vol) { std::vector<std::vector<int16_t> > *raw_data = &(vol->m_volume_data); int width = vol->getWidth(); int height = vol->getHeight(); int depth = vol->getDepth(); // Allocate volume data in gpu. cudaMalloc((void**)&d_vol, sizeof(int16_t) * width * height * depth); int16_t *dst = d_vol; for (std::vector<std::vector<int16_t> >::iterator it = raw_data->begin(); it != raw_data->end(); ++it) { int16_t *src = &((*it)[0]); size_t sz = it->size(); cudaMemcpy(dst, src, sizeof(int16_t)*sz, cudaMemcpyHostToDevice); dst += sz; } } void copyOctree(Octree* tree) { cudaMalloc((void**)&d_octree, sizeof(OctreeNode) * tree->size); cudaMemcpy(d_octree, tree->root, sizeof(OctreeNode)*tree->size, cudaMemcpyHostToDevice); } int rayCastCuda(vdcm::Volume* vol, glm::vec3 scr_center, glm::vec3 scr_delta_x, glm::vec3 scr_delta_y, int scr_width, int scr_height, unsigned char *h_screen) { //float *raw_data = vol->getBuffer(); //allocateScreenCuda(scr_width, scr_height); //copyVolCuda(vol); //printf("Get volume\n"); int wl = std::get<0>(vol->getDefaultWindowing()); window_width = std::get<1>(vol->getDefaultWindowing()); printf("%d %d\n", wl, window_width); //cudaMalloc((void**)&window_level, sizeof(int)); cudaMemcpyToSymbol(&window_level, &wl, sizeof(int),0,cudaMemcpyHostToDevice); int width = vol->getWidth(); int height = vol->getHeight(); int depth = vol->getDepth(); dim3 block_size(16, 16, 1); dim3 numBlocks(scr_width / block_size.x, scr_height / block_size.y); glm::vec4 volume_center(0, 0, 0, 1); glm::vec4 direction; glm::vec3 scr_start_corner; // Start point left bottom scr_start_corner = scr_center - (scr_width / 2.f) * scr_delta_x - (scr_height / 2.f) * scr_delta_y; // Assume that only consider parallel ray direction = volume_center - glm::vec4(scr_center, 1.); direction = glm::normalize(direction); render << <numBlocks, block_size >> > (d_vol, d_screen, width, height, depth, scr_start_corner, scr_delta_x, scr_delta_y, direction); cudaStreamSynchronize(0); cudaMemcpy(h_screen, d_screen, sizeof(unsigned char) * scr_width * scr_height * 4, cudaMemcpyDeviceToHost); //cudaFree(d_screen); //cudaFree(d_vol); return true; }
d76d3da032228a535442ae252875b2a98cf8f875.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <cassert> #include <chrono> #include "kernels.h" #include "IO.hpp" #include "Show.hpp" ///////////////////////////////////////////////////////////////////////////// // GPU CODE ///////////////////////////////////////////////////////////////////////////// // texture<RefNumberType> texVec; // texture<RefNumberType> texA; inline unsigned PowerOfTwoAlign( unsigned int n ) { int PowerOfTwoAlign = 1; while( PowerOfTwoAlign < n ) PowerOfTwoAlign = PowerOfTwoAlign << 1; return PowerOfTwoAlign; } //*/ // KERNEL WITH BLOCKS. OK __global__ void GPU_DENSE_PR( RefNumberType* const dataA, const RefNumberType* const vecOld, RefNumberType* const vec, const double d ) { // row loop runs with blockIdx.x const double InvFactor = 1/((double) gridDim.x ); double sum = 0; for( size_t j = 0; j<gridDim.x; ++j ) { // sum += A[row][j]*pold[j]; sum += dataA[blockIdx.x*gridDim.x + j]*vecOld[j]; } // sum now contains the scalar product A[row,:] DOT p vec[blockIdx.x] = d*sum + (1-d)*InvFactor; } // KERNEL WITH THREADS OK AND FASTER __global__ void GPU_DENSE_PR_2( RefNumberType* const dataA, const RefNumberType* const vecOld, RefNumberType* const vec, const double d, int n ) { // loop runs with threads int tid = threadIdx.x + blockIdx.x*blockDim.x; const double InvFactor = 1/((double) n ); if( tid < n ) { double sum = 0; for( size_t j = 0; j<n; ++j ) { // sum += A[row][j]*pold[j]; sum += dataA[tid*n + j]*vecOld[j]; } // sum now contains the scalar product A[row,:] DOT p vec[tid] = d*sum + (1-d)*InvFactor; } } // KERNEL WITH THREADS + TEXTURE MEMORY __global__ void GPU_DENSE_PR_3( RefNumberType* const dataA, // const RefNumberType* const vecOld, RefNumberType* const vec, const double d, int n ) { // row loop runs with blockIdx.x int tid = threadIdx.x + blockIdx.x*blockDim.x; const double InvFactor = 1/((double) n ); if( tid < n ) { double sum = 0; for( size_t j = 0; j<n; ++j ) { // sum += A[row][j]*pold[j]; // sum += dataA[threadIdx.x*blockDim.x + j]*vecOld[j]; // sum += dataA[tid*n + j]*tex1Dfetch(texVec, (int) j); // sum += tex1Dfetch(texA, tid*n + j)*tex1Dfetch(texVec, j); } // sum now contains the scalar product A[row,:] DOT p vec[tid] = d*sum + (1-d)*InvFactor; } } // KERNEL WITH THREADS __global__ void GPU_CSR_PR( const RefNumberType* const data, const size_t* const row_ptr, const size_t* const col_idx, const RefNumberType* const vecOld, RefNumberType* const vec, const double d, int n ) { // row loop runs with blockIdx.x int tid = threadIdx.x + blockIdx.x*blockDim.x; const double InvFactor = 1/((double) n ); if( tid < n ) { double sum = 0; for( size_t idx = row_ptr[tid]; idx < row_ptr[tid+1]; ++idx ) { sum += data[idx]*vecOld[ col_idx[idx] ]; } // sum now contains the scalar product A[row,:] DOT p vec[tid] = d*sum + (1-d)*InvFactor; } } // KERNEL WITH THREADS __global__ void GPU_CSR_PR_PartialSum( const RefNumberType* const data, const size_t* const row_ptr, const size_t* const col_idx, const RefNumberType* const vecOld, const RefNumberType* const partialSum, RefNumberType* const vec, const double d, int n ) { // row loop runs with blockIdx.x int tid = threadIdx.x + blockIdx.x*blockDim.x; const double InvFactor = 1/((double) n ); if( tid < n ) { double sum = (*partialSum); for( size_t idx = row_ptr[tid]; idx < row_ptr[tid+1]; ++idx ) { sum += data[idx]*vecOld[ col_idx[idx] ]; } // sum now contains the scalar product A[row,:] DOT p vec[tid] = d*sum + (1-d)*InvFactor; } } __global__ void GPU_Reduce_PartialSum( const RefNumberType* const vecOld, const size_t* const MaskLine, size_t MaskLine_size, RefNumberType* const result, RefNumberType defaultValue ) { (*result) = 0; for( size_t i = 0; i<MaskLine_size; ++i) { (*result) += vecOld[ MaskLine[i] ]; } (*result) *= defaultValue; } __global__ void GPU_Reduce_Error( RefNumberType* const tmpErr, RefNumberType* const vecOld, const RefNumberType* const vec, int n ) { (*tmpErr) = 0; for( size_t row = 0; row < n; ++row) { // on the fly compute norm( pold - pnext, 2) i.e. L2-norm between the current and last iteration (*tmpErr) += (vec[row]-vecOld[row])*(vec[row]-vecOld[row]); } } const int THREADS_PER_BLOCK = 1024; static_assert( THREADS_PER_BLOCK == 1024 , "Power of two value required!"); // int BlocksPerGrid = (n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK; __global__ void GPU_Reduce_Error_2( RefNumberType* const partial_tmpErr, const RefNumberType* const vecOld, const RefNumberType* const vec, int n ) { //assert( THREADS_PER_BLOCK == PowerOfTwoAlign( THREADS_PER_BLOCK ) ); __shared__ RefNumberType cache[THREADS_PER_BLOCK]; int tid = threadIdx.x + blockIdx.x * blockDim.x; float tmp = 0; while( tid < n ) { tmp += (vec[tid]-vecOld[tid])*(vec[tid]-vecOld[tid]); tid += blockDim.x * gridDim.x; } cache[threadIdx.x] = tmp; __syncthreads(); int i = blockDim.x / 2; while( i!= 0) { if( threadIdx.x < i ) { cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i/=2; } if( threadIdx.x == 0) { //(*tmpErr) = cache[0]; partial_tmpErr[ blockIdx.x ] = cache[0]; } } void showVec( RefNumberType* devPtr, int n ) { RefNumberType* tmpData = new RefNumberType[n]; hipMemcpy( tmpData, devPtr, n*sizeof(RefNumberType), hipMemcpyDeviceToHost ); for( int i = 0; i < n; ++ i ) { printf("\t%.10e", (RefNumberType) tmpData[i]); } delete[] tmpData; printf("\n\n"); } template<typename T> void showVecT( T* devPtr, int n ) { T* tmpData = new T[n]; hipMemcpy( tmpData, devPtr, n*sizeof(T), hipMemcpyDeviceToHost ); for( int i = 0; i < n; ++ i ) { printf("\t%.10e", (T) tmpData[i]); } delete[] tmpData; printf("\n\n"); } template<typename T> void showVecTu( T* devPtr, int n ) { T* tmpData = new T[n]; hipMemcpy( tmpData, devPtr, n*sizeof(T), hipMemcpyDeviceToHost ); for( int i = 0; i < n; ++ i ) { printf("\t%u", (T) tmpData[i]); } delete[] tmpData; printf("\n\n"); } std::vector<RefNumberType> GPU_PageRank_Dense( std::vector<std::vector<RefNumberType> > const &A, RefNumberType d, RefNumberType eps ) { printf("GPU - CODE, n = %u \n", A.size() ); //squared matrix required (n x n) assert( A.size() == A[0].size() ); unsigned k = 0; size_t n = A.size(); double InvFactor = 1/((double) n); RefNumberType tmpErr = 2*eps; int BlocksPerGrid = (n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK; //------------------------------------------------------------------ // GPU MEMORY //------------------------------------------------------------------ RefNumberType *dev_dataA; hipMalloc( (void**) &dev_dataA, n*n*sizeof(RefNumberType) ); RefNumberType *dev_vec; hipMalloc( (void**) &dev_vec, n*sizeof(RefNumberType) ); RefNumberType *dev_vecOld; hipMalloc( (void**) &dev_vecOld, n*sizeof(RefNumberType) ); RefNumberType *dev_partial_tmpErr; hipMalloc( (void**) &dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType) ); // TEXTURE - MEMORY // hipBindTexture( NULL, texVec, dev_vecOld, n*sizeof(RefNumberType) ); // hipBindTexture( NULL, texA, dev_dataA, n*n*sizeof(RefNumberType) ); // hipEvent_t start, stop; // hipEventCreate( &start ); // hipEventCreate( &stop ); //------------------------------------------------------------------ // GPU UPLOAD //------------------------------------------------------------------ RefNumberType* dataA = new RefNumberType[n*n]; MatrixToFlatData( dataA, A ); hipMemcpy( dev_dataA, dataA, n*n*sizeof(RefNumberType), hipMemcpyHostToDevice ); delete[] dataA; // uniform vecotor of length n, with values 1/n at all positions RefNumberType* init_vec_data = new RefNumberType[n]; for( int i = 0; i < n; ++ i ) { init_vec_data[i] = InvFactor; } hipMemcpy( dev_vec, init_vec_data, n*sizeof(RefNumberType), hipMemcpyHostToDevice ); delete[] init_vec_data; //float time_kernel = 0; //float time_kernel2 = 0; RefNumberType* partial_tmpErr = new RefNumberType[BlocksPerGrid]; while( tmpErr > eps ) { // compute p = dAp + (1-d).*1/n.*[1 1 ... 1]; // O(n^2): dense matrix-vector multiplication // printf("BEFORE\n"); // showVec( dev_vecOld, n); // showVec( dev_vec, n); hipMemcpy( dev_vecOld, dev_vec, n*sizeof(RefNumberType), hipMemcpyDeviceToDevice ); // printf("after copy\n"); // showVec( dev_vecOld, n); // showVec( dev_vec, n); // hipEventRecord( start, 0 ); // GPU_DENSE_PR<<<n,1>>>( dev_dataA, dev_vecOld, dev_vec, d ); hipLaunchKernelGGL(( GPU_DENSE_PR_2), dim3((n+31)/32), dim3(32), 0, 0, dev_dataA, dev_vecOld, dev_vec, d, n ); // GPU_DENSE_PR_3<<< (n+31)/32, 32>>>( dev_dataA, /*dev_vecOld,*/ dev_vec, d, n ); // hipEventRecord( stop, 0 ); // hipEventSynchronize( stop ); // float deltatime; // hipEventElapsedTime( &deltatime, start, stop ); // time_kernel += deltatime; // printf("AFTER\n"); // showVec( dev_vecOld, n); // showVec( dev_vec, n); // hipEventRecord( start, 0 ); // printf("n = %i\n", n ); // assert( n == THREADS_PER_BLOCK); // GPU_Reduce_Error<<<1,1>>>( dev_tmpErr, dev_vecOld, dev_vec, n); assert( THREADS_PER_BLOCK == PowerOfTwoAlign( THREADS_PER_BLOCK ) ); hipLaunchKernelGGL(( GPU_Reduce_Error_2), dim3(BlocksPerGrid), dim3(THREADS_PER_BLOCK) , 0, 0, dev_partial_tmpErr, dev_vecOld, dev_vec, n); //------------------------------------------------------------------ // GPU DOWNLOAD AND COMPLETE ERROR REDUCTION //------------------------------------------------------------------ tmpErr = 0; hipMemcpy( partial_tmpErr, dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType), hipMemcpyDeviceToHost ); for( unsigned i = 0; i<BlocksPerGrid; ++i) { tmpErr += partial_tmpErr[i]; } // hipEventRecord( stop, 0 ); // hipEventSynchronize( stop ); // hipEventElapsedTime( &deltatime, start, stop ); // time_kernel2 += deltatime; tmpErr = sqrt( tmpErr ); printf("[k = %u]: %e\n", k++, tmpErr ); // if( k > 1000 ) break; } delete[] partial_tmpErr; //------------------------------------------------------------------ // GPU DOWNLOAD //------------------------------------------------------------------ RefNumberType* tmpData = new RefNumberType[n]; hipMemcpy( tmpData, dev_vec, n*sizeof(RefNumberType), hipMemcpyDeviceToHost ); std::vector<RefNumberType> ret; ret.assign(tmpData, tmpData + n); delete[] tmpData; //------------------------------------------------------------------ // GPU CLEANUP //------------------------------------------------------------------ // hipEventDestroy( start ); // hipEventDestroy( stop ); // hipUnbindTexture( texVec ); // hipUnbindTexture( texA ); hipFree( dev_dataA ); hipFree( dev_vec ); hipFree( dev_vecOld ); hipFree( dev_partial_tmpErr ); return ret; } std::vector<RefNumberType> GPU_PageRank_CSR( CSRType<RefNumberType> const &S, size_t n, RefNumberType d, RefNumberType eps ) { printf("GPU - CODE, nZZ = %u \n", S.data.size() ); unsigned k = 0; double InvFactor = 1/((double) n); RefNumberType tmpErr = 2*eps; int BlocksPerGrid = (n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK; //------------------------------------------------------------------ // GPU MEMORY //------------------------------------------------------------------ RefNumberType *dev_data; hipMalloc( (void**) &dev_data, S.data.size()*sizeof(RefNumberType) ); size_t *dev_row_ptr; hipMalloc( (void**) &dev_row_ptr, S.row_ptr.size()*sizeof(size_t) ); size_t *dev_col_idx; hipMalloc( (void**) &dev_col_idx, S.col_idx.size()*sizeof(size_t) ); RefNumberType *dev_vec; hipMalloc( (void**) &dev_vec, n*sizeof(RefNumberType) ); RefNumberType *dev_vecOld; hipMalloc( (void**) &dev_vecOld, n*sizeof(RefNumberType) ); RefNumberType *dev_partial_tmpErr; hipMalloc( (void**) &dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType) ); // TEXTURE - MEMORY // hipBindTexture( NULL, texVec, dev_vecOld, n*sizeof(RefNumberType) ); // hipBindTexture( NULL, texA, dev_dataA, n*n*sizeof(RefNumberType) ); //------------------------------------------------------------------ // GPU UPLOAD //------------------------------------------------------------------ RefNumberType* tmp1 = new RefNumberType[ S.data.size() ]; std::copy( S.data.begin(), S.data.end(), tmp1 ); hipMemcpy( dev_data, tmp1, S.data.size()*sizeof(RefNumberType), hipMemcpyHostToDevice ); delete[] tmp1; size_t* tmp2 = new size_t[ S.row_ptr.size() ]; std::copy( S.row_ptr.begin(), S.row_ptr.end(), tmp2 ); hipMemcpy( dev_row_ptr, tmp2, S.row_ptr.size()*sizeof(size_t), hipMemcpyHostToDevice ); delete[] tmp2; size_t* tmp3 = new size_t[ S.col_idx.size() ]; std::copy( S.col_idx.begin(), S.col_idx.end(), tmp3 ); hipMemcpy( dev_col_idx, tmp3, S.col_idx.size()*sizeof(size_t), hipMemcpyHostToDevice ); delete[] tmp3; RefNumberType* init_vec_data = new RefNumberType[n]; for( int i = 0; i < n; ++ i ) { init_vec_data[i] = InvFactor; } hipMemcpy( dev_vec, init_vec_data, n*sizeof(RefNumberType), hipMemcpyHostToDevice ); delete[] init_vec_data; // float time_kernel = 0; // float time_kernel2 = 0; RefNumberType* partial_tmpErr = new RefNumberType[BlocksPerGrid]; while( tmpErr > eps ) { // copy current state to old vector. hipMemcpy( dev_vecOld, dev_vec, n*sizeof(RefNumberType), hipMemcpyDeviceToDevice ); // printf("after copy\n"); // showVec( dev_vecOld, n); // showVec( dev_vec, n); // hipEventRecord( start, 0 ); hipLaunchKernelGGL(( GPU_CSR_PR), dim3((n+31)/32), dim3(32), 0, 0, dev_data, dev_row_ptr, dev_col_idx, dev_vecOld, dev_vec, d, n ); // hipEventRecord( stop, 0 ); // hipEventSynchronize( stop ); // float deltatime; // hipEventElapsedTime( &deltatime, start, stop ); // time_kernel += deltatime; // printf("AFTER\n"); // showVec( dev_vecOld, n); // showVec( dev_vec, n); // hipEventRecord( start, 0 ); // printf("n = %i\n", n ); // assert( n == THREADS_PER_BLOCK); // GPU_Reduce_Error<<<1,1>>>( dev_tmpErr, dev_vecOld, dev_vec, n); assert( THREADS_PER_BLOCK == PowerOfTwoAlign( THREADS_PER_BLOCK ) ); hipLaunchKernelGGL(( GPU_Reduce_Error_2), dim3(BlocksPerGrid), dim3(THREADS_PER_BLOCK) , 0, 0, dev_partial_tmpErr, dev_vecOld, dev_vec, n); //------------------------------------------------------------------ // GPU DOWNLOAD AND COMPLETE ERROR REDUCTION //------------------------------------------------------------------ tmpErr = 0; hipMemcpy( partial_tmpErr, dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType), hipMemcpyDeviceToHost ); for( unsigned i = 0; i<BlocksPerGrid; ++i) { tmpErr += partial_tmpErr[i]; } // hipEventRecord( stop, 0 ); // hipEventSynchronize( stop ); // hipEventElapsedTime( &deltatime, start, stop ); // time_kernel2 += deltatime; tmpErr = sqrt( tmpErr ); printf("[k = %u]: %e\n", k++, tmpErr ); // if( k > 1000 ) break; } delete[] partial_tmpErr; //------------------------------------------------------------------ // GPU DOWNLOAD //------------------------------------------------------------------ RefNumberType* tmpData = new RefNumberType[n]; hipMemcpy( tmpData, dev_vec, n*sizeof(RefNumberType), hipMemcpyDeviceToHost ); std::vector<RefNumberType> ret; ret.assign(tmpData, tmpData + n); delete[] tmpData; //------------------------------------------------------------------ // GPU CLEANUP //------------------------------------------------------------------ // hipEventDestroy( start ); // hipEventDestroy( stop ); //hipUnbindTexture( texVec ); //hipUnbindTexture( texA ); hipFree( dev_data ); hipFree( dev_row_ptr ); hipFree( dev_col_idx ); hipFree( dev_vec ); hipFree( dev_vecOld ); hipFree( dev_partial_tmpErr ); return ret; } std::vector<RefNumberType> GPU_PageRank_CSR_OPT( CSRType<RefNumberType> const &S, size_t n, std::vector<size_t> const &MaskLine, RefNumberType defaultValue, RefNumberType d, RefNumberType eps ) { printf("GPU - CODE, nZZ = %u \n", S.data.size() ); unsigned k = 0; double InvFactor = 1/((double) n); RefNumberType tmpErr = 2*eps; int BlocksPerGrid = (n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK; //------------------------------------------------------------------ // GPU MEMORY //------------------------------------------------------------------ RefNumberType *dev_data; hipMalloc( (void**) &dev_data, S.data.size()*sizeof(RefNumberType) ); size_t *dev_row_ptr; hipMalloc( (void**) &dev_row_ptr, S.row_ptr.size()*sizeof(size_t) ); size_t *dev_col_idx; hipMalloc( (void**) &dev_col_idx, S.col_idx.size()*sizeof(size_t) ); size_t *dev_MaskLine; hipMalloc( (void**) &dev_MaskLine, MaskLine.size()*sizeof(size_t) ); RefNumberType *dev_vec; hipMalloc( (void**) &dev_vec, n*sizeof(RefNumberType) ); RefNumberType *dev_vecOld; hipMalloc( (void**) &dev_vecOld, n*sizeof(RefNumberType) ); RefNumberType *dev_partial_tmpErr; hipMalloc( (void**) &dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType) ); RefNumberType *dev_partialSum; hipMalloc( (void**) &dev_partialSum, sizeof(RefNumberType) ); // TEXTURE - MEMORY // hipBindTexture( NULL, texVec, dev_vecOld, n*sizeof(RefNumberType) ); // hipBindTexture( NULL, texA, dev_dataA, n*n*sizeof(RefNumberType) ); //------------------------------------------------------------------ // GPU UPLOAD //------------------------------------------------------------------ RefNumberType* tmp1 = new RefNumberType[ S.data.size() ]; std::copy( S.data.begin(), S.data.end(), tmp1 ); hipMemcpy( dev_data, tmp1, S.data.size()*sizeof(RefNumberType), hipMemcpyHostToDevice ); delete[] tmp1; size_t* tmp2 = new size_t[ S.row_ptr.size() ]; std::copy( S.row_ptr.begin(), S.row_ptr.end(), tmp2 ); hipMemcpy( dev_row_ptr, tmp2, S.row_ptr.size()*sizeof(size_t), hipMemcpyHostToDevice ); delete[] tmp2; size_t* tmp3 = new size_t[ S.col_idx.size() ]; std::copy( S.col_idx.begin(), S.col_idx.end(), tmp3 ); hipMemcpy( dev_col_idx, tmp3, S.col_idx.size()*sizeof(size_t), hipMemcpyHostToDevice ); delete[] tmp3; size_t* tmp4 = new size_t[ S.col_idx.size() ]; std::copy( MaskLine.begin(), MaskLine.end(), tmp4 ); hipMemcpy( dev_MaskLine, tmp4, MaskLine.size()*sizeof(size_t), hipMemcpyHostToDevice ); delete[] tmp4; RefNumberType* init_vec_data = new RefNumberType[n]; for( int i = 0; i < n; ++ i ) { init_vec_data[i] = InvFactor; } hipMemcpy( dev_vec, init_vec_data, n*sizeof(RefNumberType), hipMemcpyHostToDevice ); delete[] init_vec_data; RefNumberType* partial_tmpErr = new RefNumberType[BlocksPerGrid]; while( tmpErr > eps ) { // copy current state to old vector. hipMemcpy( dev_vecOld, dev_vec, n*sizeof(RefNumberType), hipMemcpyDeviceToDevice ); hipLaunchKernelGGL(( GPU_Reduce_PartialSum), dim3(1),dim3(1), 0, 0, dev_vecOld, dev_MaskLine, MaskLine.size(), dev_partialSum, defaultValue ); hipLaunchKernelGGL(( GPU_CSR_PR_PartialSum), dim3((n+31)/32), dim3(32), 0, 0, dev_data, dev_row_ptr, dev_col_idx, dev_vecOld, dev_partialSum, dev_vec, d, n ); assert( THREADS_PER_BLOCK == PowerOfTwoAlign( THREADS_PER_BLOCK ) ); hipLaunchKernelGGL(( GPU_Reduce_Error_2), dim3(BlocksPerGrid), dim3(THREADS_PER_BLOCK) , 0, 0, dev_partial_tmpErr, dev_vecOld, dev_vec, n); //------------------------------------------------------------------ // GPU DOWNLOAD AND COMPLETE ERROR REDUCTION //------------------------------------------------------------------ tmpErr = 0; hipMemcpy( partial_tmpErr, dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType), hipMemcpyDeviceToHost ); for( unsigned i = 0; i<BlocksPerGrid; ++i) { tmpErr += partial_tmpErr[i]; } tmpErr = sqrt( tmpErr ); printf("[k = %u]: %e\n", k++, tmpErr ); // if( k > 1000 ) break; } delete[] partial_tmpErr; //------------------------------------------------------------------ // GPU DOWNLOAD //------------------------------------------------------------------ RefNumberType* tmpData = new RefNumberType[n]; hipMemcpy( tmpData, dev_vec, n*sizeof(RefNumberType), hipMemcpyDeviceToHost ); std::vector<RefNumberType> ret; ret.assign(tmpData, tmpData + n); delete[] tmpData; //------------------------------------------------------------------ // GPU CLEANUP //------------------------------------------------------------------ hipFree( dev_data ); hipFree( dev_row_ptr ); hipFree( dev_col_idx ); hipFree( dev_MaskLine ); hipFree( dev_vec ); hipFree( dev_vecOld ); hipFree( dev_partial_tmpErr ); hipFree( dev_partialSum ); return ret; } //------------------------------------------------------------------ // THIS FUNCION IS ONLY USED FOR TESTABILITY WHILE DEVELOPMENT //------------------------------------------------------------------ RefNumberType TestWrapper_KERNEL_ReduceError( RefNumberType* init_vec_1, RefNumberType* init_vec_2, int n, int THREADS) { int BlocksPerGrid = (n+THREADS-1)/THREADS; //------------------------------------------------------------------ // GPU MEMORY //------------------------------------------------------------------ RefNumberType *dev_vec; hipMalloc( (void**) &dev_vec, n*sizeof(RefNumberType) ); RefNumberType *dev_vecOld; hipMalloc( (void**) &dev_vecOld, n*sizeof(RefNumberType) ); RefNumberType *dev_partial_tmpErr; hipMalloc( (void**) &dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType) ); //------------------------------------------------------------------ // GPU UPLOAD //------------------------------------------------------------------ hipMemcpy( dev_vec, init_vec_1, n*sizeof(RefNumberType), hipMemcpyHostToDevice ); hipMemcpy( dev_vecOld, init_vec_2, n*sizeof(RefNumberType), hipMemcpyHostToDevice ); // GPU_Reduce_Error<<<1,1>>>( dev_tmpErr, dev_vecOld, dev_vec, n); assert( THREADS == PowerOfTwoAlign( THREADS ) ); hipLaunchKernelGGL(( GPU_Reduce_Error_2), dim3(BlocksPerGrid), dim3(THREADS) , 0, 0, dev_partial_tmpErr, dev_vecOld, dev_vec, n); //------------------------------------------------------------------ // GPU DOWNLOAD //------------------------------------------------------------------ RefNumberType tmpErr = 0; RefNumberType* partial_tmpErr = new RefNumberType[BlocksPerGrid]; hipMemcpy( partial_tmpErr, dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType), hipMemcpyDeviceToHost ); for( unsigned i = 0; i<BlocksPerGrid; ++i) { tmpErr += partial_tmpErr[i]; } delete[] partial_tmpErr; //------------------------------------------------------------------ // GPU CLEANUP //------------------------------------------------------------------ hipFree( dev_vec ); hipFree( dev_vecOld ); hipFree( dev_partial_tmpErr ); return tmpErr; }
d76d3da032228a535442ae252875b2a98cf8f875.cu
#include <cmath> #include <cassert> #include <chrono> #include "kernels.h" #include "IO.hpp" #include "Show.hpp" ///////////////////////////////////////////////////////////////////////////// // GPU CODE ///////////////////////////////////////////////////////////////////////////// // texture<RefNumberType> texVec; // texture<RefNumberType> texA; inline unsigned PowerOfTwoAlign( unsigned int n ) { int PowerOfTwoAlign = 1; while( PowerOfTwoAlign < n ) PowerOfTwoAlign = PowerOfTwoAlign << 1; return PowerOfTwoAlign; } //*/ // KERNEL WITH BLOCKS. OK __global__ void GPU_DENSE_PR( RefNumberType* const dataA, const RefNumberType* const vecOld, RefNumberType* const vec, const double d ) { // row loop runs with blockIdx.x const double InvFactor = 1/((double) gridDim.x ); double sum = 0; for( size_t j = 0; j<gridDim.x; ++j ) { // sum += A[row][j]*pold[j]; sum += dataA[blockIdx.x*gridDim.x + j]*vecOld[j]; } // sum now contains the scalar product A[row,:] DOT p vec[blockIdx.x] = d*sum + (1-d)*InvFactor; } // KERNEL WITH THREADS OK AND FASTER __global__ void GPU_DENSE_PR_2( RefNumberType* const dataA, const RefNumberType* const vecOld, RefNumberType* const vec, const double d, int n ) { // loop runs with threads int tid = threadIdx.x + blockIdx.x*blockDim.x; const double InvFactor = 1/((double) n ); if( tid < n ) { double sum = 0; for( size_t j = 0; j<n; ++j ) { // sum += A[row][j]*pold[j]; sum += dataA[tid*n + j]*vecOld[j]; } // sum now contains the scalar product A[row,:] DOT p vec[tid] = d*sum + (1-d)*InvFactor; } } // KERNEL WITH THREADS + TEXTURE MEMORY __global__ void GPU_DENSE_PR_3( RefNumberType* const dataA, // const RefNumberType* const vecOld, RefNumberType* const vec, const double d, int n ) { // row loop runs with blockIdx.x int tid = threadIdx.x + blockIdx.x*blockDim.x; const double InvFactor = 1/((double) n ); if( tid < n ) { double sum = 0; for( size_t j = 0; j<n; ++j ) { // sum += A[row][j]*pold[j]; // sum += dataA[threadIdx.x*blockDim.x + j]*vecOld[j]; // sum += dataA[tid*n + j]*tex1Dfetch(texVec, (int) j); // sum += tex1Dfetch(texA, tid*n + j)*tex1Dfetch(texVec, j); } // sum now contains the scalar product A[row,:] DOT p vec[tid] = d*sum + (1-d)*InvFactor; } } // KERNEL WITH THREADS __global__ void GPU_CSR_PR( const RefNumberType* const data, const size_t* const row_ptr, const size_t* const col_idx, const RefNumberType* const vecOld, RefNumberType* const vec, const double d, int n ) { // row loop runs with blockIdx.x int tid = threadIdx.x + blockIdx.x*blockDim.x; const double InvFactor = 1/((double) n ); if( tid < n ) { double sum = 0; for( size_t idx = row_ptr[tid]; idx < row_ptr[tid+1]; ++idx ) { sum += data[idx]*vecOld[ col_idx[idx] ]; } // sum now contains the scalar product A[row,:] DOT p vec[tid] = d*sum + (1-d)*InvFactor; } } // KERNEL WITH THREADS __global__ void GPU_CSR_PR_PartialSum( const RefNumberType* const data, const size_t* const row_ptr, const size_t* const col_idx, const RefNumberType* const vecOld, const RefNumberType* const partialSum, RefNumberType* const vec, const double d, int n ) { // row loop runs with blockIdx.x int tid = threadIdx.x + blockIdx.x*blockDim.x; const double InvFactor = 1/((double) n ); if( tid < n ) { double sum = (*partialSum); for( size_t idx = row_ptr[tid]; idx < row_ptr[tid+1]; ++idx ) { sum += data[idx]*vecOld[ col_idx[idx] ]; } // sum now contains the scalar product A[row,:] DOT p vec[tid] = d*sum + (1-d)*InvFactor; } } __global__ void GPU_Reduce_PartialSum( const RefNumberType* const vecOld, const size_t* const MaskLine, size_t MaskLine_size, RefNumberType* const result, RefNumberType defaultValue ) { (*result) = 0; for( size_t i = 0; i<MaskLine_size; ++i) { (*result) += vecOld[ MaskLine[i] ]; } (*result) *= defaultValue; } __global__ void GPU_Reduce_Error( RefNumberType* const tmpErr, RefNumberType* const vecOld, const RefNumberType* const vec, int n ) { (*tmpErr) = 0; for( size_t row = 0; row < n; ++row) { // on the fly compute norm( pold - pnext, 2) i.e. L2-norm between the current and last iteration (*tmpErr) += (vec[row]-vecOld[row])*(vec[row]-vecOld[row]); } } const int THREADS_PER_BLOCK = 1024; static_assert( THREADS_PER_BLOCK == 1024 , "Power of two value required!"); // int BlocksPerGrid = (n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK; __global__ void GPU_Reduce_Error_2( RefNumberType* const partial_tmpErr, const RefNumberType* const vecOld, const RefNumberType* const vec, int n ) { //assert( THREADS_PER_BLOCK == PowerOfTwoAlign( THREADS_PER_BLOCK ) ); __shared__ RefNumberType cache[THREADS_PER_BLOCK]; int tid = threadIdx.x + blockIdx.x * blockDim.x; float tmp = 0; while( tid < n ) { tmp += (vec[tid]-vecOld[tid])*(vec[tid]-vecOld[tid]); tid += blockDim.x * gridDim.x; } cache[threadIdx.x] = tmp; __syncthreads(); int i = blockDim.x / 2; while( i!= 0) { if( threadIdx.x < i ) { cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i/=2; } if( threadIdx.x == 0) { //(*tmpErr) = cache[0]; partial_tmpErr[ blockIdx.x ] = cache[0]; } } void showVec( RefNumberType* devPtr, int n ) { RefNumberType* tmpData = new RefNumberType[n]; cudaMemcpy( tmpData, devPtr, n*sizeof(RefNumberType), cudaMemcpyDeviceToHost ); for( int i = 0; i < n; ++ i ) { printf("\t%.10e", (RefNumberType) tmpData[i]); } delete[] tmpData; printf("\n\n"); } template<typename T> void showVecT( T* devPtr, int n ) { T* tmpData = new T[n]; cudaMemcpy( tmpData, devPtr, n*sizeof(T), cudaMemcpyDeviceToHost ); for( int i = 0; i < n; ++ i ) { printf("\t%.10e", (T) tmpData[i]); } delete[] tmpData; printf("\n\n"); } template<typename T> void showVecTu( T* devPtr, int n ) { T* tmpData = new T[n]; cudaMemcpy( tmpData, devPtr, n*sizeof(T), cudaMemcpyDeviceToHost ); for( int i = 0; i < n; ++ i ) { printf("\t%u", (T) tmpData[i]); } delete[] tmpData; printf("\n\n"); } std::vector<RefNumberType> GPU_PageRank_Dense( std::vector<std::vector<RefNumberType> > const &A, RefNumberType d, RefNumberType eps ) { printf("GPU - CODE, n = %u \n", A.size() ); //squared matrix required (n x n) assert( A.size() == A[0].size() ); unsigned k = 0; size_t n = A.size(); double InvFactor = 1/((double) n); RefNumberType tmpErr = 2*eps; int BlocksPerGrid = (n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK; //------------------------------------------------------------------ // GPU MEMORY //------------------------------------------------------------------ RefNumberType *dev_dataA; cudaMalloc( (void**) &dev_dataA, n*n*sizeof(RefNumberType) ); RefNumberType *dev_vec; cudaMalloc( (void**) &dev_vec, n*sizeof(RefNumberType) ); RefNumberType *dev_vecOld; cudaMalloc( (void**) &dev_vecOld, n*sizeof(RefNumberType) ); RefNumberType *dev_partial_tmpErr; cudaMalloc( (void**) &dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType) ); // TEXTURE - MEMORY // cudaBindTexture( NULL, texVec, dev_vecOld, n*sizeof(RefNumberType) ); // cudaBindTexture( NULL, texA, dev_dataA, n*n*sizeof(RefNumberType) ); // cudaEvent_t start, stop; // cudaEventCreate( &start ); // cudaEventCreate( &stop ); //------------------------------------------------------------------ // GPU UPLOAD //------------------------------------------------------------------ RefNumberType* dataA = new RefNumberType[n*n]; MatrixToFlatData( dataA, A ); cudaMemcpy( dev_dataA, dataA, n*n*sizeof(RefNumberType), cudaMemcpyHostToDevice ); delete[] dataA; // uniform vecotor of length n, with values 1/n at all positions RefNumberType* init_vec_data = new RefNumberType[n]; for( int i = 0; i < n; ++ i ) { init_vec_data[i] = InvFactor; } cudaMemcpy( dev_vec, init_vec_data, n*sizeof(RefNumberType), cudaMemcpyHostToDevice ); delete[] init_vec_data; //float time_kernel = 0; //float time_kernel2 = 0; RefNumberType* partial_tmpErr = new RefNumberType[BlocksPerGrid]; while( tmpErr > eps ) { // compute p = dAp + (1-d).*1/n.*[1 1 ... 1]; // O(n^2): dense matrix-vector multiplication // printf("BEFORE\n"); // showVec( dev_vecOld, n); // showVec( dev_vec, n); cudaMemcpy( dev_vecOld, dev_vec, n*sizeof(RefNumberType), cudaMemcpyDeviceToDevice ); // printf("after copy\n"); // showVec( dev_vecOld, n); // showVec( dev_vec, n); // cudaEventRecord( start, 0 ); // GPU_DENSE_PR<<<n,1>>>( dev_dataA, dev_vecOld, dev_vec, d ); GPU_DENSE_PR_2<<< (n+31)/32, 32>>>( dev_dataA, dev_vecOld, dev_vec, d, n ); // GPU_DENSE_PR_3<<< (n+31)/32, 32>>>( dev_dataA, /*dev_vecOld,*/ dev_vec, d, n ); // cudaEventRecord( stop, 0 ); // cudaEventSynchronize( stop ); // float deltatime; // cudaEventElapsedTime( &deltatime, start, stop ); // time_kernel += deltatime; // printf("AFTER\n"); // showVec( dev_vecOld, n); // showVec( dev_vec, n); // cudaEventRecord( start, 0 ); // printf("n = %i\n", n ); // assert( n == THREADS_PER_BLOCK); // GPU_Reduce_Error<<<1,1>>>( dev_tmpErr, dev_vecOld, dev_vec, n); assert( THREADS_PER_BLOCK == PowerOfTwoAlign( THREADS_PER_BLOCK ) ); GPU_Reduce_Error_2<<< BlocksPerGrid, THREADS_PER_BLOCK >>>( dev_partial_tmpErr, dev_vecOld, dev_vec, n); //------------------------------------------------------------------ // GPU DOWNLOAD AND COMPLETE ERROR REDUCTION //------------------------------------------------------------------ tmpErr = 0; cudaMemcpy( partial_tmpErr, dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType), cudaMemcpyDeviceToHost ); for( unsigned i = 0; i<BlocksPerGrid; ++i) { tmpErr += partial_tmpErr[i]; } // cudaEventRecord( stop, 0 ); // cudaEventSynchronize( stop ); // cudaEventElapsedTime( &deltatime, start, stop ); // time_kernel2 += deltatime; tmpErr = sqrt( tmpErr ); printf("[k = %u]: %e\n", k++, tmpErr ); // if( k > 1000 ) break; } delete[] partial_tmpErr; //------------------------------------------------------------------ // GPU DOWNLOAD //------------------------------------------------------------------ RefNumberType* tmpData = new RefNumberType[n]; cudaMemcpy( tmpData, dev_vec, n*sizeof(RefNumberType), cudaMemcpyDeviceToHost ); std::vector<RefNumberType> ret; ret.assign(tmpData, tmpData + n); delete[] tmpData; //------------------------------------------------------------------ // GPU CLEANUP //------------------------------------------------------------------ // cudaEventDestroy( start ); // cudaEventDestroy( stop ); // cudaUnbindTexture( texVec ); // cudaUnbindTexture( texA ); cudaFree( dev_dataA ); cudaFree( dev_vec ); cudaFree( dev_vecOld ); cudaFree( dev_partial_tmpErr ); return ret; } std::vector<RefNumberType> GPU_PageRank_CSR( CSRType<RefNumberType> const &S, size_t n, RefNumberType d, RefNumberType eps ) { printf("GPU - CODE, nZZ = %u \n", S.data.size() ); unsigned k = 0; double InvFactor = 1/((double) n); RefNumberType tmpErr = 2*eps; int BlocksPerGrid = (n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK; //------------------------------------------------------------------ // GPU MEMORY //------------------------------------------------------------------ RefNumberType *dev_data; cudaMalloc( (void**) &dev_data, S.data.size()*sizeof(RefNumberType) ); size_t *dev_row_ptr; cudaMalloc( (void**) &dev_row_ptr, S.row_ptr.size()*sizeof(size_t) ); size_t *dev_col_idx; cudaMalloc( (void**) &dev_col_idx, S.col_idx.size()*sizeof(size_t) ); RefNumberType *dev_vec; cudaMalloc( (void**) &dev_vec, n*sizeof(RefNumberType) ); RefNumberType *dev_vecOld; cudaMalloc( (void**) &dev_vecOld, n*sizeof(RefNumberType) ); RefNumberType *dev_partial_tmpErr; cudaMalloc( (void**) &dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType) ); // TEXTURE - MEMORY // cudaBindTexture( NULL, texVec, dev_vecOld, n*sizeof(RefNumberType) ); // cudaBindTexture( NULL, texA, dev_dataA, n*n*sizeof(RefNumberType) ); //------------------------------------------------------------------ // GPU UPLOAD //------------------------------------------------------------------ RefNumberType* tmp1 = new RefNumberType[ S.data.size() ]; std::copy( S.data.begin(), S.data.end(), tmp1 ); cudaMemcpy( dev_data, tmp1, S.data.size()*sizeof(RefNumberType), cudaMemcpyHostToDevice ); delete[] tmp1; size_t* tmp2 = new size_t[ S.row_ptr.size() ]; std::copy( S.row_ptr.begin(), S.row_ptr.end(), tmp2 ); cudaMemcpy( dev_row_ptr, tmp2, S.row_ptr.size()*sizeof(size_t), cudaMemcpyHostToDevice ); delete[] tmp2; size_t* tmp3 = new size_t[ S.col_idx.size() ]; std::copy( S.col_idx.begin(), S.col_idx.end(), tmp3 ); cudaMemcpy( dev_col_idx, tmp3, S.col_idx.size()*sizeof(size_t), cudaMemcpyHostToDevice ); delete[] tmp3; RefNumberType* init_vec_data = new RefNumberType[n]; for( int i = 0; i < n; ++ i ) { init_vec_data[i] = InvFactor; } cudaMemcpy( dev_vec, init_vec_data, n*sizeof(RefNumberType), cudaMemcpyHostToDevice ); delete[] init_vec_data; // float time_kernel = 0; // float time_kernel2 = 0; RefNumberType* partial_tmpErr = new RefNumberType[BlocksPerGrid]; while( tmpErr > eps ) { // copy current state to old vector. cudaMemcpy( dev_vecOld, dev_vec, n*sizeof(RefNumberType), cudaMemcpyDeviceToDevice ); // printf("after copy\n"); // showVec( dev_vecOld, n); // showVec( dev_vec, n); // cudaEventRecord( start, 0 ); GPU_CSR_PR<<< (n+31)/32, 32>>>( dev_data, dev_row_ptr, dev_col_idx, dev_vecOld, dev_vec, d, n ); // cudaEventRecord( stop, 0 ); // cudaEventSynchronize( stop ); // float deltatime; // cudaEventElapsedTime( &deltatime, start, stop ); // time_kernel += deltatime; // printf("AFTER\n"); // showVec( dev_vecOld, n); // showVec( dev_vec, n); // cudaEventRecord( start, 0 ); // printf("n = %i\n", n ); // assert( n == THREADS_PER_BLOCK); // GPU_Reduce_Error<<<1,1>>>( dev_tmpErr, dev_vecOld, dev_vec, n); assert( THREADS_PER_BLOCK == PowerOfTwoAlign( THREADS_PER_BLOCK ) ); GPU_Reduce_Error_2<<< BlocksPerGrid, THREADS_PER_BLOCK >>>( dev_partial_tmpErr, dev_vecOld, dev_vec, n); //------------------------------------------------------------------ // GPU DOWNLOAD AND COMPLETE ERROR REDUCTION //------------------------------------------------------------------ tmpErr = 0; cudaMemcpy( partial_tmpErr, dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType), cudaMemcpyDeviceToHost ); for( unsigned i = 0; i<BlocksPerGrid; ++i) { tmpErr += partial_tmpErr[i]; } // cudaEventRecord( stop, 0 ); // cudaEventSynchronize( stop ); // cudaEventElapsedTime( &deltatime, start, stop ); // time_kernel2 += deltatime; tmpErr = sqrt( tmpErr ); printf("[k = %u]: %e\n", k++, tmpErr ); // if( k > 1000 ) break; } delete[] partial_tmpErr; //------------------------------------------------------------------ // GPU DOWNLOAD //------------------------------------------------------------------ RefNumberType* tmpData = new RefNumberType[n]; cudaMemcpy( tmpData, dev_vec, n*sizeof(RefNumberType), cudaMemcpyDeviceToHost ); std::vector<RefNumberType> ret; ret.assign(tmpData, tmpData + n); delete[] tmpData; //------------------------------------------------------------------ // GPU CLEANUP //------------------------------------------------------------------ // cudaEventDestroy( start ); // cudaEventDestroy( stop ); //cudaUnbindTexture( texVec ); //cudaUnbindTexture( texA ); cudaFree( dev_data ); cudaFree( dev_row_ptr ); cudaFree( dev_col_idx ); cudaFree( dev_vec ); cudaFree( dev_vecOld ); cudaFree( dev_partial_tmpErr ); return ret; } std::vector<RefNumberType> GPU_PageRank_CSR_OPT( CSRType<RefNumberType> const &S, size_t n, std::vector<size_t> const &MaskLine, RefNumberType defaultValue, RefNumberType d, RefNumberType eps ) { printf("GPU - CODE, nZZ = %u \n", S.data.size() ); unsigned k = 0; double InvFactor = 1/((double) n); RefNumberType tmpErr = 2*eps; int BlocksPerGrid = (n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK; //------------------------------------------------------------------ // GPU MEMORY //------------------------------------------------------------------ RefNumberType *dev_data; cudaMalloc( (void**) &dev_data, S.data.size()*sizeof(RefNumberType) ); size_t *dev_row_ptr; cudaMalloc( (void**) &dev_row_ptr, S.row_ptr.size()*sizeof(size_t) ); size_t *dev_col_idx; cudaMalloc( (void**) &dev_col_idx, S.col_idx.size()*sizeof(size_t) ); size_t *dev_MaskLine; cudaMalloc( (void**) &dev_MaskLine, MaskLine.size()*sizeof(size_t) ); RefNumberType *dev_vec; cudaMalloc( (void**) &dev_vec, n*sizeof(RefNumberType) ); RefNumberType *dev_vecOld; cudaMalloc( (void**) &dev_vecOld, n*sizeof(RefNumberType) ); RefNumberType *dev_partial_tmpErr; cudaMalloc( (void**) &dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType) ); RefNumberType *dev_partialSum; cudaMalloc( (void**) &dev_partialSum, sizeof(RefNumberType) ); // TEXTURE - MEMORY // cudaBindTexture( NULL, texVec, dev_vecOld, n*sizeof(RefNumberType) ); // cudaBindTexture( NULL, texA, dev_dataA, n*n*sizeof(RefNumberType) ); //------------------------------------------------------------------ // GPU UPLOAD //------------------------------------------------------------------ RefNumberType* tmp1 = new RefNumberType[ S.data.size() ]; std::copy( S.data.begin(), S.data.end(), tmp1 ); cudaMemcpy( dev_data, tmp1, S.data.size()*sizeof(RefNumberType), cudaMemcpyHostToDevice ); delete[] tmp1; size_t* tmp2 = new size_t[ S.row_ptr.size() ]; std::copy( S.row_ptr.begin(), S.row_ptr.end(), tmp2 ); cudaMemcpy( dev_row_ptr, tmp2, S.row_ptr.size()*sizeof(size_t), cudaMemcpyHostToDevice ); delete[] tmp2; size_t* tmp3 = new size_t[ S.col_idx.size() ]; std::copy( S.col_idx.begin(), S.col_idx.end(), tmp3 ); cudaMemcpy( dev_col_idx, tmp3, S.col_idx.size()*sizeof(size_t), cudaMemcpyHostToDevice ); delete[] tmp3; size_t* tmp4 = new size_t[ S.col_idx.size() ]; std::copy( MaskLine.begin(), MaskLine.end(), tmp4 ); cudaMemcpy( dev_MaskLine, tmp4, MaskLine.size()*sizeof(size_t), cudaMemcpyHostToDevice ); delete[] tmp4; RefNumberType* init_vec_data = new RefNumberType[n]; for( int i = 0; i < n; ++ i ) { init_vec_data[i] = InvFactor; } cudaMemcpy( dev_vec, init_vec_data, n*sizeof(RefNumberType), cudaMemcpyHostToDevice ); delete[] init_vec_data; RefNumberType* partial_tmpErr = new RefNumberType[BlocksPerGrid]; while( tmpErr > eps ) { // copy current state to old vector. cudaMemcpy( dev_vecOld, dev_vec, n*sizeof(RefNumberType), cudaMemcpyDeviceToDevice ); GPU_Reduce_PartialSum<<<1,1>>>( dev_vecOld, dev_MaskLine, MaskLine.size(), dev_partialSum, defaultValue ); GPU_CSR_PR_PartialSum<<< (n+31)/32, 32>>>( dev_data, dev_row_ptr, dev_col_idx, dev_vecOld, dev_partialSum, dev_vec, d, n ); assert( THREADS_PER_BLOCK == PowerOfTwoAlign( THREADS_PER_BLOCK ) ); GPU_Reduce_Error_2<<< BlocksPerGrid, THREADS_PER_BLOCK >>>( dev_partial_tmpErr, dev_vecOld, dev_vec, n); //------------------------------------------------------------------ // GPU DOWNLOAD AND COMPLETE ERROR REDUCTION //------------------------------------------------------------------ tmpErr = 0; cudaMemcpy( partial_tmpErr, dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType), cudaMemcpyDeviceToHost ); for( unsigned i = 0; i<BlocksPerGrid; ++i) { tmpErr += partial_tmpErr[i]; } tmpErr = sqrt( tmpErr ); printf("[k = %u]: %e\n", k++, tmpErr ); // if( k > 1000 ) break; } delete[] partial_tmpErr; //------------------------------------------------------------------ // GPU DOWNLOAD //------------------------------------------------------------------ RefNumberType* tmpData = new RefNumberType[n]; cudaMemcpy( tmpData, dev_vec, n*sizeof(RefNumberType), cudaMemcpyDeviceToHost ); std::vector<RefNumberType> ret; ret.assign(tmpData, tmpData + n); delete[] tmpData; //------------------------------------------------------------------ // GPU CLEANUP //------------------------------------------------------------------ cudaFree( dev_data ); cudaFree( dev_row_ptr ); cudaFree( dev_col_idx ); cudaFree( dev_MaskLine ); cudaFree( dev_vec ); cudaFree( dev_vecOld ); cudaFree( dev_partial_tmpErr ); cudaFree( dev_partialSum ); return ret; } //------------------------------------------------------------------ // THIS FUNCION IS ONLY USED FOR TESTABILITY WHILE DEVELOPMENT //------------------------------------------------------------------ RefNumberType TestWrapper_KERNEL_ReduceError( RefNumberType* init_vec_1, RefNumberType* init_vec_2, int n, int THREADS) { int BlocksPerGrid = (n+THREADS-1)/THREADS; //------------------------------------------------------------------ // GPU MEMORY //------------------------------------------------------------------ RefNumberType *dev_vec; cudaMalloc( (void**) &dev_vec, n*sizeof(RefNumberType) ); RefNumberType *dev_vecOld; cudaMalloc( (void**) &dev_vecOld, n*sizeof(RefNumberType) ); RefNumberType *dev_partial_tmpErr; cudaMalloc( (void**) &dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType) ); //------------------------------------------------------------------ // GPU UPLOAD //------------------------------------------------------------------ cudaMemcpy( dev_vec, init_vec_1, n*sizeof(RefNumberType), cudaMemcpyHostToDevice ); cudaMemcpy( dev_vecOld, init_vec_2, n*sizeof(RefNumberType), cudaMemcpyHostToDevice ); // GPU_Reduce_Error<<<1,1>>>( dev_tmpErr, dev_vecOld, dev_vec, n); assert( THREADS == PowerOfTwoAlign( THREADS ) ); GPU_Reduce_Error_2<<< BlocksPerGrid, THREADS >>>( dev_partial_tmpErr, dev_vecOld, dev_vec, n); //------------------------------------------------------------------ // GPU DOWNLOAD //------------------------------------------------------------------ RefNumberType tmpErr = 0; RefNumberType* partial_tmpErr = new RefNumberType[BlocksPerGrid]; cudaMemcpy( partial_tmpErr, dev_partial_tmpErr, BlocksPerGrid*sizeof(RefNumberType), cudaMemcpyDeviceToHost ); for( unsigned i = 0; i<BlocksPerGrid; ++i) { tmpErr += partial_tmpErr[i]; } delete[] partial_tmpErr; //------------------------------------------------------------------ // GPU CLEANUP //------------------------------------------------------------------ cudaFree( dev_vec ); cudaFree( dev_vecOld ); cudaFree( dev_partial_tmpErr ); return tmpErr; }
4ace7d4813faeacee7a6cad2586e23648fd37069.hip
// !!! This is a file automatically generated by hipify!!! /* * Face Factor Distance * (MP3, Fall 2019, GPU Programming/Yifan Liu) */ #include <assert.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> #include <ctime> #include <string> #include <sstream> /* Usage message displayed when invalid command line arguments are supplied */ #define USAGE \ "MP3 takes a (m x k) matrix M \n" \ "and compute the distance betwen rows and save teh result if two rows' distance is smaller than 0.3\n" \ "The values of m, k must be >= 1.\n" \ "\n" \ "Usage: mp3 m k\n" /* Tile size*/ #ifndef TILE_WIDTH # define TILE_WIDTH 16 #endif /* If a CUDA call fails, display an error message and exit */ #define CUDA_CHECK(e) { \ hipError_t err = (e); \ if (err != hipSuccess) \ { \ fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", \ __FILE__, __LINE__, #e, hipGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } /* assert() is only supported on devices of compute capability >= 2.0 */ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) # undef assert # define assert(arg) #endif /*getDistance calculate the distance among rows*/ __global__ static void getDistance_gpu(float *d_M, float *d_P, int m, int k) { assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col= blockIdx.x * TILE_WIDTH + threadIdx.x; if(row >= m || col >= m) return; if(row == col){ d_P[row*m+col] = 100; return; } float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(d_M[row*k+i] - d_M[col*k+i], 2); } expected = sqrt(expected); d_P[row*m+col] = expected; } /* Displays one row of the given matrix */ static void printRow(int row, float *matrix, int cols) { printf("["); if (cols >= 1) printf(" %3.3f", matrix[row*cols+0]); if (cols >= 2) printf(" %3.3f", matrix[row*cols+1]); if (cols >= 3) printf(" %3.3f", matrix[row*cols+2]); if (cols >= 6) printf(" ..."); if (cols >= 5) printf(" %3.3f", matrix[row*cols+(cols-2)]); if (cols >= 4) printf(" %3.3f", matrix[row*cols+(cols-1)]); printf(" ]\n"); } /* Displays the given matrix */ static void printMatrix(float *matrix, int rows, int cols) { if (rows >= 1) printRow(0, matrix, cols); if (rows >= 2) printRow(1, matrix, cols); if (rows >= 3) printRow(2, matrix, cols); if (rows >= 6) printf(" ...\n"); if (rows >= 5) printRow(rows-2, matrix, cols); if (rows >= 4) printRow(rows-1, matrix, cols); } /* Program entrypoint. */ int main() { /* read in m and k here */ std::cout << "Loading matrices...\n"; clock_t begin = clock(); int m, k; std::ifstream in1; in1.open("descriptor.txt"); if(in1.is_open()) printf("File opened successfully\n"); else printf("File opened unsuccessfully\n"); std::string line, temp; // read in m and k while ((std::getline(in1, line))){ if (line == "end header") break; std::istringstream ss(line); std::cout << line << std::endl; if(line.find("line_number")!=-1) ss >> temp >> m; else if(line.find("vector_dimension")!=-1) ss >> temp >> k; } printf("The matrix is %d x %d\n", m, k); if (m < 1 || k < 1) { fprintf(stderr, USAGE); fprintf(stderr, "Invalid value for m or k (%d, %d)\n", m, k); system("Pause"); return EXIT_FAILURE; } size_t bytesForM = m * k * sizeof(float); float *h_M = (float *)malloc(bytesForM); /* Fill M (on host) */ for (int i = 0; i < m*k; ++i) in1 >> h_M[i]; in1.close(); clock_t end = clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Reading input file took %f seconds\n", elapsed_secs); printf("M =\n"); printMatrix(h_M, m, k); printf("using (%d x %d) tiles.\n", TILE_WIDTH, TILE_WIDTH); /********************************************/ /* M is (m x k), P is (m x m) */ /********************************************/ size_t bytesForP = m * m * sizeof(float); float *h_P = (float *)malloc(bytesForP); if (h_M == NULL || h_P == NULL) { fprintf(stderr, "Unable to allocate host memory\n"); system("Pause"); return EXIT_FAILURE; } /* Allocate device memory for matrices */ float *d_M, *d_P; CUDA_CHECK(hipMalloc((void **)&d_M, bytesForM)); CUDA_CHECK(hipMalloc((void **)&d_P, bytesForP)); /* Copy M to device global memory */ CUDA_CHECK(hipMemcpy(d_M, h_M, bytesForM, hipMemcpyHostToDevice)); /* Launch the CUDA kernel */ dim3 dimGrid((m+TILE_WIDTH-1)/TILE_WIDTH, (m+TILE_WIDTH-1)/TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); printf("matMul called from host\n"); hipLaunchKernelGGL(( getDistance_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, d_M, d_P, m, k); CUDA_CHECK(hipDeviceSynchronize()); /* Copy result matrix from device global memory back to host memory */ CUDA_CHECK(hipMemcpy(h_P, d_P, bytesForP, hipMemcpyDeviceToHost)); printf(" product received from host\n"); printf("P =\n"); printMatrix(h_P, m, m); printf("Saving result\n"); std::ofstream out; out.open("matrix.txt"); for (int i = 0; i < m; i++){ for (int j = 0; j < m; j++){ if (h_P[i*m+j] < 0.3) out << j+1 << " "; } out << std::endl; } out.close(); /* Free device global memory */ CUDA_CHECK(hipFree(d_M)); CUDA_CHECK(hipFree(d_P)); /* Free host memory */ free(h_M); free(h_P); /* Reset the device (unnecessary if not profiling, but good practice) */ CUDA_CHECK(hipDeviceReset()); printf("Done\n"); system("Pause"); return EXIT_SUCCESS; }
4ace7d4813faeacee7a6cad2586e23648fd37069.cu
/* * Face Factor Distance * (MP3, Fall 2019, GPU Programming/Yifan Liu) */ #include <assert.h> #include <cuda.h> #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> #include <ctime> #include <string> #include <sstream> /* Usage message displayed when invalid command line arguments are supplied */ #define USAGE \ "MP3 takes a (m x k) matrix M \n" \ "and compute the distance betwen rows and save teh result if two rows' distance is smaller than 0.3\n" \ "The values of m, k must be >= 1.\n" \ "\n" \ "Usage: mp3 m k\n" /* Tile size*/ #ifndef TILE_WIDTH # define TILE_WIDTH 16 #endif /* If a CUDA call fails, display an error message and exit */ #define CUDA_CHECK(e) { \ cudaError_t err = (e); \ if (err != cudaSuccess) \ { \ fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", \ __FILE__, __LINE__, #e, cudaGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } /* assert() is only supported on devices of compute capability >= 2.0 */ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) # undef assert # define assert(arg) #endif /*getDistance calculate the distance among rows*/ __global__ static void getDistance_gpu(float *d_M, float *d_P, int m, int k) { assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH); int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col= blockIdx.x * TILE_WIDTH + threadIdx.x; if(row >= m || col >= m) return; if(row == col){ d_P[row*m+col] = 100; return; } float expected = 0.0; for (int i = 0; i < k; i++) { expected += pow(d_M[row*k+i] - d_M[col*k+i], 2); } expected = sqrt(expected); d_P[row*m+col] = expected; } /* Displays one row of the given matrix */ static void printRow(int row, float *matrix, int cols) { printf("["); if (cols >= 1) printf(" %3.3f", matrix[row*cols+0]); if (cols >= 2) printf(" %3.3f", matrix[row*cols+1]); if (cols >= 3) printf(" %3.3f", matrix[row*cols+2]); if (cols >= 6) printf(" ..."); if (cols >= 5) printf(" %3.3f", matrix[row*cols+(cols-2)]); if (cols >= 4) printf(" %3.3f", matrix[row*cols+(cols-1)]); printf(" ]\n"); } /* Displays the given matrix */ static void printMatrix(float *matrix, int rows, int cols) { if (rows >= 1) printRow(0, matrix, cols); if (rows >= 2) printRow(1, matrix, cols); if (rows >= 3) printRow(2, matrix, cols); if (rows >= 6) printf(" ...\n"); if (rows >= 5) printRow(rows-2, matrix, cols); if (rows >= 4) printRow(rows-1, matrix, cols); } /* Program entrypoint. */ int main() { /* read in m and k here */ std::cout << "Loading matrices...\n"; clock_t begin = clock(); int m, k; std::ifstream in1; in1.open("descriptor.txt"); if(in1.is_open()) printf("File opened successfully\n"); else printf("File opened unsuccessfully\n"); std::string line, temp; // read in m and k while ((std::getline(in1, line))){ if (line == "end header") break; std::istringstream ss(line); std::cout << line << std::endl; if(line.find("line_number")!=-1) ss >> temp >> m; else if(line.find("vector_dimension")!=-1) ss >> temp >> k; } printf("The matrix is %d x %d\n", m, k); if (m < 1 || k < 1) { fprintf(stderr, USAGE); fprintf(stderr, "Invalid value for m or k (%d, %d)\n", m, k); system("Pause"); return EXIT_FAILURE; } size_t bytesForM = m * k * sizeof(float); float *h_M = (float *)malloc(bytesForM); /* Fill M (on host) */ for (int i = 0; i < m*k; ++i) in1 >> h_M[i]; in1.close(); clock_t end = clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("Reading input file took %f seconds\n", elapsed_secs); printf("M =\n"); printMatrix(h_M, m, k); printf("using (%d x %d) tiles.\n", TILE_WIDTH, TILE_WIDTH); /********************************************/ /* M is (m x k), P is (m x m) */ /********************************************/ size_t bytesForP = m * m * sizeof(float); float *h_P = (float *)malloc(bytesForP); if (h_M == NULL || h_P == NULL) { fprintf(stderr, "Unable to allocate host memory\n"); system("Pause"); return EXIT_FAILURE; } /* Allocate device memory for matrices */ float *d_M, *d_P; CUDA_CHECK(cudaMalloc((void **)&d_M, bytesForM)); CUDA_CHECK(cudaMalloc((void **)&d_P, bytesForP)); /* Copy M to device global memory */ CUDA_CHECK(cudaMemcpy(d_M, h_M, bytesForM, cudaMemcpyHostToDevice)); /* Launch the CUDA kernel */ dim3 dimGrid((m+TILE_WIDTH-1)/TILE_WIDTH, (m+TILE_WIDTH-1)/TILE_WIDTH); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); printf("matMul called from host\n"); getDistance_gpu<<<dimGrid, dimBlock>>>(d_M, d_P, m, k); CUDA_CHECK(cudaDeviceSynchronize()); /* Copy result matrix from device global memory back to host memory */ CUDA_CHECK(cudaMemcpy(h_P, d_P, bytesForP, cudaMemcpyDeviceToHost)); printf(" product received from host\n"); printf("P =\n"); printMatrix(h_P, m, m); printf("Saving result\n"); std::ofstream out; out.open("matrix.txt"); for (int i = 0; i < m; i++){ for (int j = 0; j < m; j++){ if (h_P[i*m+j] < 0.3) out << j+1 << " "; } out << std::endl; } out.close(); /* Free device global memory */ CUDA_CHECK(cudaFree(d_M)); CUDA_CHECK(cudaFree(d_P)); /* Free host memory */ free(h_M); free(h_P); /* Reset the device (unnecessary if not profiling, but good practice) */ CUDA_CHECK(cudaDeviceReset()); printf("Done\n"); system("Pause"); return EXIT_SUCCESS; }
26698b061b198e202238692db55988619a8665f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019-2020, NVIDIA CORPORATION. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_cooperative_groups.h> #if ( __CUDACC_VER_MAJOR__ > 10 ) #include <cooperative_groups/reduce.h> #endif namespace cg = cooperative_groups; // Check if C++17 is being used #if __cplusplus >= 201703L #include <thrust/complex.h> /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER 8x8 // /////////////////////////////////////////////////////////////////////////////// // T is input type // U is output type template<typename T, typename U, int M = 8, int WARPSIZE = 32> __device__ void _cupy_channelizer_8x8( const int n_chans, const int n_taps, const int n_pts, const T *__restrict__ x, const T *__restrict__ h, U *__restrict__ y, T s_mem[M][M] ) { const auto block { cg::this_thread_block( ) }; const auto tile_32 { cg::tiled_partition<WARPSIZE>( block ) }; const auto tile { cg::tiled_partition<M>( tile_32 ) }; const auto btx { blockIdx.x * blockDim.x + threadIdx.x }; const auto tx { threadIdx.x }; const auto ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = thrust::conj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = h[ty * n_chans + btx]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } block.sync( ); T local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][( n_taps - 1 ) - ty] = thrust::conj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } } else { if ( btx < n_chans && ty <= bid ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][bid - ty] = thrust::conj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } } block.sync( ); T local_reg { s_mem[ty][tx] }; T temp {}; U vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { vv.real( cg::reduce( tile, temp.real( ), cg::plus<typename U::value_type>( ) ) ); vv.imag( cg::reduce( tile, temp.imag( ), cg::plus<typename U::value_type>( ) ) ); } else { vv.real( cg::reduce( tile, temp, cg::plus<typename U::value_type>( ) ) ); } } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ float s_mem[8][8]; _cupy_channelizer_8x8<float, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<float> *__restrict__ x, const thrust::complex<float> *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ thrust::complex<float> s_mem[8][8]; _cupy_channelizer_8x8<thrust::complex<float>, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ double s_mem[8][8]; _cupy_channelizer_8x8<double, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<double> *__restrict__ x, const thrust::complex<double> *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ thrust::complex<double> s_mem[8][8]; _cupy_channelizer_8x8<thrust::complex<double>, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER 16x16 // /////////////////////////////////////////////////////////////////////////////// // T is input type // U is output type template<typename T, typename U, int M = 16, int WARPSIZE = 32> __device__ void _cupy_channelizer_16x16( const int n_chans, const int n_taps, const int n_pts, const T *__restrict__ x, const T *__restrict__ h, U *__restrict__ y, T s_mem[M][M] ) { const auto block { cg::this_thread_block( ) }; const auto tile_32 { cg::tiled_partition<WARPSIZE>( block ) }; const auto tile { cg::tiled_partition<M>( tile_32 ) }; const auto btx { blockIdx.x * blockDim.x + threadIdx.x }; const auto tx { threadIdx.x }; const auto ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = thrust::conj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = h[ty * n_chans + btx]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } block.sync( ); T local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][( n_taps - 1 ) - ty] = thrust::conj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } } else { if ( btx < n_chans && ty <= bid ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][bid - ty] = thrust::conj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } } block.sync( ); T local_reg { s_mem[ty][tx] }; T temp {}; U vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { vv.real( cg::reduce( tile, temp.real( ), cg::plus<typename U::value_type>( ) ) ); vv.imag( cg::reduce( tile, temp.imag( ), cg::plus<typename U::value_type>( ) ) ); } else { vv.real( cg::reduce( tile, temp, cg::plus<typename U::value_type>( ) ) ); } } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ float s_mem[16][16]; _cupy_channelizer_16x16<float, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<float> *__restrict__ x, const thrust::complex<float> *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ thrust::complex<float> s_mem[16][16]; _cupy_channelizer_16x16<thrust::complex<float>, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ double s_mem[16][16]; _cupy_channelizer_16x16<double, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<double> *__restrict__ x, const thrust::complex<double> *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ thrust::complex<double> s_mem[16][16]; _cupy_channelizer_16x16<thrust::complex<double>, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER 32x32 // /////////////////////////////////////////////////////////////////////////////// // T is input type // U is output type template<typename T, typename U, int M = 32, int WARPSIZE = 32> __device__ void _cupy_channelizer_32x32( const int n_chans, const int n_taps, const int n_pts, const T *__restrict__ x, const T *__restrict__ h, U *__restrict__ y, T s_mem[M][M] ) { const auto block { cg::this_thread_block( ) }; const auto tile { cg::tiled_partition<WARPSIZE>( block ) }; const auto btx { blockIdx.x * blockDim.x + threadIdx.x }; const auto tx { threadIdx.x }; const auto ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = thrust::conj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = h[ty * n_chans + btx]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } block.sync( ); T local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][( n_taps - 1 ) - ty] = thrust::conj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } } else { if ( btx < n_chans && ty <= bid ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][bid - ty] = thrust::conj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } } block.sync( ); T local_reg { s_mem[ty][tx] }; T temp {}; U vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { vv.real( cg::reduce( tile, temp.real( ), cg::plus<typename U::value_type>( ) ) ); vv.imag( cg::reduce( tile, temp.imag( ), cg::plus<typename U::value_type>( ) ) ); } else { vv.real( cg::reduce( tile, temp, cg::plus<typename U::value_type>( ) ) ); } } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ float s_mem[32][32]; _cupy_channelizer_32x32<float, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<float> *__restrict__ x, const thrust::complex<float> *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ thrust::complex<float> s_mem[32][32]; _cupy_channelizer_32x32<thrust::complex<float>, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ double s_mem[32][32]; _cupy_channelizer_32x32<double, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<double> *__restrict__ x, const thrust::complex<double> *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ thrust::complex<double> s_mem[32][32]; _cupy_channelizer_32x32<thrust::complex<double>, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } #else // C++11 being used /////////////////////////////////////////////////////////////////////////////// // CUDA 10.1/10.2 // /////////////////////////////////////////////////////////////////////////////// #include <hip/hip_complex.h> template<typename T, int M> __device__ T reduce_sum_tile_shfl( cg::thread_block_tile<M> g, T val ) { // Each iteration halves the number of active threads // Each thread adds its partial sum[i] to sum[lane+i] for ( int i = g.size( ) / 2; i > 0; i /= 2 ) { val += g.shfl_down( val, i ); } return val; // note: only thread 0 will return full sum } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER F/CF // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y, float s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = h[ty * n_chans + btx]; } else { s_mem[tx][ty] = 0.0f; } block.sync( ); float local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } else { s_mem[tx][ty] = 0.0f; } } block.sync( ); float local_reg { s_mem[ty][tx] }; float temp {}; cuFloatComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; vv.x = reduce_sum_tile_shfl<float, M>( tile, temp ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ float s_mem[8][8]; _cupy_channelizer_float32_complex64<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ float s_mem[16][16]; _cupy_channelizer_float32_complex64<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ float s_mem[32][32]; _cupy_channelizer_float32_complex64<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER CF/CF // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y, cuFloatComplex s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = cuConjf( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = make_cuFloatComplex( 0.0f, 0.0f ); } block.sync( ); cuFloatComplex local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = cuConjf( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = cuConjf( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][ty] = make_cuFloatComplex( 0.0f, 0.0f ); } } block.sync( ); cuFloatComplex local_reg { s_mem[ty][tx] }; cuFloatComplex temp {}; cuFloatComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = cuCmulf( local_h, local_reg ); vv.x = reduce_sum_tile_shfl<float, M>( tile, temp.x ); vv.y = reduce_sum_tile_shfl<float, M>( tile, temp.y ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ cuFloatComplex s_mem[8][8]; _cupy_channelizer_complex64_complex64<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ cuFloatComplex s_mem[16][16]; _cupy_channelizer_complex64_complex64<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ cuFloatComplex s_mem[32][32]; _cupy_channelizer_complex64_complex64<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER D/CD // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, hipDoubleComplex *__restrict__ y, double s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = h[ty * n_chans + btx]; } else { s_mem[tx][ty] = 0.0; } block.sync( ); double local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } else { s_mem[tx][ty] = 0.0; } } block.sync( ); double local_reg { s_mem[ty][tx] }; double temp {}; hipDoubleComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; vv.x = reduce_sum_tile_shfl<double, M>( tile, temp ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, hipDoubleComplex *__restrict__ y ) { __shared__ double s_mem[8][8]; _cupy_channelizer_float64_complex128<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, hipDoubleComplex *__restrict__ y ) { __shared__ double s_mem[16][16]; _cupy_channelizer_float64_complex128<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, hipDoubleComplex *__restrict__ y ) { __shared__ double s_mem[32][32]; _cupy_channelizer_float64_complex128<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER CD/CD // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const hipDoubleComplex *__restrict__ x, const hipDoubleComplex *__restrict__ h, hipDoubleComplex *__restrict__ y, hipDoubleComplex s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = cuConj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = make_cuDoubleComplex( 0.0, 0.0 ); } block.sync( ); hipDoubleComplex local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = cuConj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = cuConj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][ty] = make_cuDoubleComplex( 0.0, 0.0 ); } } block.sync( ); hipDoubleComplex local_reg { s_mem[ty][tx] }; hipDoubleComplex temp {}; hipDoubleComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = cuCmul( local_h, local_reg ); vv.x = reduce_sum_tile_shfl<double, M>( tile, temp.x ); vv.y = reduce_sum_tile_shfl<double, M>( tile, temp.y ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const hipDoubleComplex *__restrict__ x, const hipDoubleComplex *__restrict__ h, hipDoubleComplex *__restrict__ y ) { __shared__ hipDoubleComplex s_mem[8][8]; _cupy_channelizer_complex128_complex128<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const hipDoubleComplex *__restrict__ x, const hipDoubleComplex *__restrict__ h, hipDoubleComplex *__restrict__ y ) { __shared__ hipDoubleComplex s_mem[16][16]; _cupy_channelizer_complex128_complex128<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const hipDoubleComplex *__restrict__ x, const hipDoubleComplex *__restrict__ h, hipDoubleComplex *__restrict__ y ) { __shared__ hipDoubleComplex s_mem[32][32]; _cupy_channelizer_complex128_complex128<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } #endif
26698b061b198e202238692db55988619a8665f2.cu
// Copyright (c) 2019-2020, NVIDIA CORPORATION. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cooperative_groups.h> #if ( __CUDACC_VER_MAJOR__ > 10 ) #include <cooperative_groups/reduce.h> #endif namespace cg = cooperative_groups; // Check if C++17 is being used #if __cplusplus >= 201703L #include <thrust/complex.h> /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER 8x8 // /////////////////////////////////////////////////////////////////////////////// // T is input type // U is output type template<typename T, typename U, int M = 8, int WARPSIZE = 32> __device__ void _cupy_channelizer_8x8( const int n_chans, const int n_taps, const int n_pts, const T *__restrict__ x, const T *__restrict__ h, U *__restrict__ y, T s_mem[M][M] ) { const auto block { cg::this_thread_block( ) }; const auto tile_32 { cg::tiled_partition<WARPSIZE>( block ) }; const auto tile { cg::tiled_partition<M>( tile_32 ) }; const auto btx { blockIdx.x * blockDim.x + threadIdx.x }; const auto tx { threadIdx.x }; const auto ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = thrust::conj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = h[ty * n_chans + btx]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } block.sync( ); T local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][( n_taps - 1 ) - ty] = thrust::conj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } } else { if ( btx < n_chans && ty <= bid ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][bid - ty] = thrust::conj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } } block.sync( ); T local_reg { s_mem[ty][tx] }; T temp {}; U vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { vv.real( cg::reduce( tile, temp.real( ), cg::plus<typename U::value_type>( ) ) ); vv.imag( cg::reduce( tile, temp.imag( ), cg::plus<typename U::value_type>( ) ) ); } else { vv.real( cg::reduce( tile, temp, cg::plus<typename U::value_type>( ) ) ); } } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ float s_mem[8][8]; _cupy_channelizer_8x8<float, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<float> *__restrict__ x, const thrust::complex<float> *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ thrust::complex<float> s_mem[8][8]; _cupy_channelizer_8x8<thrust::complex<float>, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ double s_mem[8][8]; _cupy_channelizer_8x8<double, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<double> *__restrict__ x, const thrust::complex<double> *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ thrust::complex<double> s_mem[8][8]; _cupy_channelizer_8x8<thrust::complex<double>, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER 16x16 // /////////////////////////////////////////////////////////////////////////////// // T is input type // U is output type template<typename T, typename U, int M = 16, int WARPSIZE = 32> __device__ void _cupy_channelizer_16x16( const int n_chans, const int n_taps, const int n_pts, const T *__restrict__ x, const T *__restrict__ h, U *__restrict__ y, T s_mem[M][M] ) { const auto block { cg::this_thread_block( ) }; const auto tile_32 { cg::tiled_partition<WARPSIZE>( block ) }; const auto tile { cg::tiled_partition<M>( tile_32 ) }; const auto btx { blockIdx.x * blockDim.x + threadIdx.x }; const auto tx { threadIdx.x }; const auto ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = thrust::conj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = h[ty * n_chans + btx]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } block.sync( ); T local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][( n_taps - 1 ) - ty] = thrust::conj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } } else { if ( btx < n_chans && ty <= bid ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][bid - ty] = thrust::conj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } } block.sync( ); T local_reg { s_mem[ty][tx] }; T temp {}; U vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { vv.real( cg::reduce( tile, temp.real( ), cg::plus<typename U::value_type>( ) ) ); vv.imag( cg::reduce( tile, temp.imag( ), cg::plus<typename U::value_type>( ) ) ); } else { vv.real( cg::reduce( tile, temp, cg::plus<typename U::value_type>( ) ) ); } } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ float s_mem[16][16]; _cupy_channelizer_16x16<float, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<float> *__restrict__ x, const thrust::complex<float> *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ thrust::complex<float> s_mem[16][16]; _cupy_channelizer_16x16<thrust::complex<float>, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ double s_mem[16][16]; _cupy_channelizer_16x16<double, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<double> *__restrict__ x, const thrust::complex<double> *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ thrust::complex<double> s_mem[16][16]; _cupy_channelizer_16x16<thrust::complex<double>, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER 32x32 // /////////////////////////////////////////////////////////////////////////////// // T is input type // U is output type template<typename T, typename U, int M = 32, int WARPSIZE = 32> __device__ void _cupy_channelizer_32x32( const int n_chans, const int n_taps, const int n_pts, const T *__restrict__ x, const T *__restrict__ h, U *__restrict__ y, T s_mem[M][M] ) { const auto block { cg::this_thread_block( ) }; const auto tile { cg::tiled_partition<WARPSIZE>( block ) }; const auto btx { blockIdx.x * blockDim.x + threadIdx.x }; const auto tx { threadIdx.x }; const auto ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = thrust::conj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = h[ty * n_chans + btx]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } block.sync( ); T local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][( n_taps - 1 ) - ty] = thrust::conj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } } else { if ( btx < n_chans && ty <= bid ) { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][bid - ty] = thrust::conj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } } else { if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { s_mem[tx][ty] = T( 0.0, 0.0 ); } else { s_mem[tx][ty] = 0.0; } } } block.sync( ); T local_reg { s_mem[ty][tx] }; T temp {}; U vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; if constexpr ( std::is_same_v<T, thrust::complex<float>> || std::is_same_v<T, thrust::complex<double>> ) { vv.real( cg::reduce( tile, temp.real( ), cg::plus<typename U::value_type>( ) ) ); vv.imag( cg::reduce( tile, temp.imag( ), cg::plus<typename U::value_type>( ) ) ); } else { vv.real( cg::reduce( tile, temp, cg::plus<typename U::value_type>( ) ) ); } } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ float s_mem[32][32]; _cupy_channelizer_32x32<float, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<float> *__restrict__ x, const thrust::complex<float> *__restrict__ h, thrust::complex<float> *__restrict__ y ) { __shared__ thrust::complex<float> s_mem[32][32]; _cupy_channelizer_32x32<thrust::complex<float>, thrust::complex<float>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ double s_mem[32][32]; _cupy_channelizer_32x32<double, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const thrust::complex<double> *__restrict__ x, const thrust::complex<double> *__restrict__ h, thrust::complex<double> *__restrict__ y ) { __shared__ thrust::complex<double> s_mem[32][32]; _cupy_channelizer_32x32<thrust::complex<double>, thrust::complex<double>>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } #else // C++11 being used /////////////////////////////////////////////////////////////////////////////// // CUDA 10.1/10.2 // /////////////////////////////////////////////////////////////////////////////// #include <cuComplex.h> template<typename T, int M> __device__ T reduce_sum_tile_shfl( cg::thread_block_tile<M> g, T val ) { // Each iteration halves the number of active threads // Each thread adds its partial sum[i] to sum[lane+i] for ( int i = g.size( ) / 2; i > 0; i /= 2 ) { val += g.shfl_down( val, i ); } return val; // note: only thread 0 will return full sum } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER F/CF // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y, float s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = h[ty * n_chans + btx]; } else { s_mem[tx][ty] = 0.0f; } block.sync( ); float local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } else { s_mem[tx][ty] = 0.0f; } } block.sync( ); float local_reg { s_mem[ty][tx] }; float temp {}; cuFloatComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; vv.x = reduce_sum_tile_shfl<float, M>( tile, temp ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ float s_mem[8][8]; _cupy_channelizer_float32_complex64<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ float s_mem[16][16]; _cupy_channelizer_float32_complex64<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float32_complex64( const int n_chans, const int n_taps, const int n_pts, const float *__restrict__ x, const float *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ float s_mem[32][32]; _cupy_channelizer_float32_complex64<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER CF/CF // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y, cuFloatComplex s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = cuConjf( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = make_cuFloatComplex( 0.0f, 0.0f ); } block.sync( ); cuFloatComplex local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = cuConjf( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = cuConjf( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][ty] = make_cuFloatComplex( 0.0f, 0.0f ); } } block.sync( ); cuFloatComplex local_reg { s_mem[ty][tx] }; cuFloatComplex temp {}; cuFloatComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = cuCmulf( local_h, local_reg ); vv.x = reduce_sum_tile_shfl<float, M>( tile, temp.x ); vv.y = reduce_sum_tile_shfl<float, M>( tile, temp.y ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ cuFloatComplex s_mem[8][8]; _cupy_channelizer_complex64_complex64<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ cuFloatComplex s_mem[16][16]; _cupy_channelizer_complex64_complex64<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex64_complex64( const int n_chans, const int n_taps, const int n_pts, const cuFloatComplex *__restrict__ x, const cuFloatComplex *__restrict__ h, cuFloatComplex *__restrict__ y ) { __shared__ cuFloatComplex s_mem[32][32]; _cupy_channelizer_complex64_complex64<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER D/CD // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, cuDoubleComplex *__restrict__ y, double s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = h[ty * n_chans + btx]; } else { s_mem[tx][ty] = 0.0; } block.sync( ); double local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )]; } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = x[ty * n_chans + ( n_chans - 1 - btx )]; } else { s_mem[tx][ty] = 0.0; } } block.sync( ); double local_reg { s_mem[ty][tx] }; double temp {}; cuDoubleComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = local_h * local_reg; vv.x = reduce_sum_tile_shfl<double, M>( tile, temp ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ double s_mem[8][8]; _cupy_channelizer_float64_complex128<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ double s_mem[16][16]; _cupy_channelizer_float64_complex128<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_float64_complex128( const int n_chans, const int n_taps, const int n_pts, const double *__restrict__ x, const double *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ double s_mem[32][32]; _cupy_channelizer_float64_complex128<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } /////////////////////////////////////////////////////////////////////////////// // CHANNELIZER CD/CD // /////////////////////////////////////////////////////////////////////////////// template<int M> __device__ void _cupy_channelizer_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const cuDoubleComplex *__restrict__ x, const cuDoubleComplex *__restrict__ h, cuDoubleComplex *__restrict__ y, cuDoubleComplex s_mem[M][M] ) { const auto block = cg::this_thread_block( ); const auto tile = cg::tiled_partition<M>( block ); const unsigned int btx { blockIdx.x * blockDim.x + threadIdx.x }; const unsigned int tx { threadIdx.x }; const unsigned int ty { threadIdx.y }; // Initialize shared memory // Evaluate type at compile-time if ( btx < n_chans && ty < n_taps ) { s_mem[tx][ty] = cuConj( h[ty * n_chans + btx] ); } else { s_mem[tx][ty] = make_cuDoubleComplex( 0.0, 0.0 ); } block.sync( ); cuDoubleComplex local_h { s_mem[ty][tx] }; for ( auto bid = blockIdx.y; bid < n_pts; bid += gridDim.y ) { block.sync( ); // Load data if ( bid >= n_taps ) { if ( btx < n_chans && ty < n_taps ) { s_mem[tx][( n_taps - 1 ) - ty] = cuConj( x[( ( bid - n_taps + 1 ) + ty ) * n_chans + ( n_chans - 1 - btx )] ); } } else { if ( btx < n_chans && ty <= bid ) { s_mem[tx][bid - ty] = cuConj( x[ty * n_chans + ( n_chans - 1 - btx )] ); } else { s_mem[tx][ty] = make_cuDoubleComplex( 0.0, 0.0 ); } } block.sync( ); cuDoubleComplex local_reg { s_mem[ty][tx] }; cuDoubleComplex temp {}; cuDoubleComplex vv {}; // Perform compute if ( ( blockIdx.x * M + ty ) < n_chans ) { temp = cuCmul( local_h, local_reg ); vv.x = reduce_sum_tile_shfl<double, M>( tile, temp.x ); vv.y = reduce_sum_tile_shfl<double, M>( tile, temp.y ); } // Store output if ( tx == 0 && ( blockIdx.x * M + ty ) < n_chans ) { y[bid * n_chans + ( blockIdx.x * M + ty )] = vv; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_channelizer_8x8_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const cuDoubleComplex *__restrict__ x, const cuDoubleComplex *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ cuDoubleComplex s_mem[8][8]; _cupy_channelizer_complex128_complex128<8>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_channelizer_16x16_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const cuDoubleComplex *__restrict__ x, const cuDoubleComplex *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ cuDoubleComplex s_mem[16][16]; _cupy_channelizer_complex128_complex128<16>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_channelizer_32x32_complex128_complex128( const int n_chans, const int n_taps, const int n_pts, const cuDoubleComplex *__restrict__ x, const cuDoubleComplex *__restrict__ h, cuDoubleComplex *__restrict__ y ) { __shared__ cuDoubleComplex s_mem[32][32]; _cupy_channelizer_complex128_complex128<32>( n_chans, n_taps, n_pts, x, h, y, s_mem ); } #endif
ec0fb1a7f4dc08a0b46798cbacf0934964afdc26.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Command line options for performance test program */ #include <algorithm> #include "cutlass/cutlass.h" #include "cutlass/version.h" #include "cutlass/library/util.h" #include "options.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Newline and indent for help strings static char const *end_of_line = "\n "; ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Device::Device(cutlass::CommandLine const &cmdline) { cmdline.get_cmd_line_argument("device", device, 0); hipError_t result; result = hipGetDeviceProperties(&properties, device); if (result != hipSuccess) { throw std::runtime_error("hipGetDeviceProperties() failed for given device"); } result = hipSetDevice(device); if (result != hipSuccess) { throw std::runtime_error("hipSetDevice() failed for given device."); } // Permit overriding the compute capability if (cmdline.check_cmd_line_flag("compute-capability")) { int cc = compute_capability(); cmdline.get_cmd_line_argument("compute-capability", cc, cc); properties.major = cc / 10; properties.minor = cc % 10; } // Permit overriding the L2 cache capacity if (cmdline.check_cmd_line_flag("llc-capacity")) { int llc_capacity = 0; cmdline.get_cmd_line_argument("llc-capacity", llc_capacity, 0); if (llc_capacity >= 0) { properties.l2CacheSize = (llc_capacity << 10); } } } void Options::Device::print_usage(std::ostream &out) const { out << "Device:\n" << " --device=<int> " << " CUDA Device ID\n\n"; int device_count = 0; hipError_t result = hipGetDeviceCount(&device_count); if (result != hipSuccess) { out << " <could not query for CUDA devices>\n"; } else { for (int idx = 0; idx < device_count; ++idx) { hipDeviceProp_t prop; result = hipGetDeviceProperties(&prop, idx); if (result != hipSuccess) { out << " <could not obtain device properties for device " << idx << ">" << std::endl; break; } else { out << " [" << idx << "] - " << prop.name << " - SM " << prop.major << "." << prop.minor << ", " << prop.multiProcessorCount << " SMs @ " << (prop.clockRate / 1000.0) << " MHz, " << "L2 cache: " << (prop.l2CacheSize >> 20) << " MB, Global Memory: " << (prop.totalGlobalMem >> 30) << " GB" << std::endl; } } out << "\n"; } out << " --compute-capability=<int> " << " Override the compute capability.\n\n" << " --llc-capacity=<capacity in KiB> " << " Capacity of last-level cache in kilobytes. If this is non-zero," << end_of_line << " profiling phases cycle through different input tensors to induce" << end_of_line << " capacity misses in the L2.\n\n"; } void Options::Device::print_device_info(std::ostream &out) const { int num_devices; hipDeviceProp_t props; hipError_t result; result = hipGetDeviceCount(&num_devices); if (result != hipSuccess) { throw std::runtime_error("cudaGetNumDevices() failed"); } out << "Device Name,SM,CUDA Device ID,Phy Device ID" << std::endl; for(int device = 0; device < num_devices; device++) { result = hipSetDevice(device); if (result != hipSuccess) { throw std::runtime_error("hipSetDevice() failed for device"); } result = hipGetDeviceProperties(&props, device); if (result != hipSuccess) { throw std::runtime_error("hipGetDeviceProperties failed for device"); } out << props.name << "," << props.major << props.minor << "," << device << "," << props.multiGpuBoardGroupID << std::endl; } } void Options::Device::print_options(std::ostream &out, int indent) const { out << indent_str(indent) << "device: " << device << "\n" << indent_str(indent) << "clock: " << int(double(properties.clockRate) / 1000.0) << "\n" << indent_str(indent) << "compute-capability: " << compute_capability() << "\n"; } /// Returns the compute capability of the listed device (e.g. 61, 60, 70, 75) int Options::Device::compute_capability() const { return properties.major * 10 + properties.minor; } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Initialization::Initialization(cutlass::CommandLine const &cmdline) { cmdline.get_cmd_line_argument("initialization-enabled", enabled, true); if (cmdline.check_cmd_line_flag("initialization-provider")) { std::string str; cmdline.get_cmd_line_argument("initialization-provider", str); provider = library::from_string<library::Provider>(str); if (provider == library::Provider::kInvalid) { enabled = false; } else if (provider != library::Provider::kReferenceHost && provider != library::Provider::kReferenceDevice) { throw std::runtime_error("Unsupported initialization provider specified."); } } else { provider = library::Provider::kReferenceDevice; } cmdline.get_cmd_line_argument("seed", seed, 2019); if (cmdline.check_cmd_line_flag("dist")) { // user has set the data distribution (fix data distribution once set) fix_data_distribution = true; // set user provided data distribution get_distribution(cmdline, "dist", data_distribution); } else { // profiler chosen data distribution (allowed to change based on numeric types) fix_data_distribution = false; // set uniform data distribution with range [-4, 4] data_distribution.set_uniform(-4, 4, 0); } } /// Gets the initial distribution void Options::Initialization::get_distribution( cutlass::CommandLine const &args, std::string const &arg, cutlass::Distribution &dist) { struct { const char *label; cutlass::Distribution::Kind kind; } distribution_kinds[] = { {"uniform", cutlass::Distribution::Uniform}, {"gaussian", cutlass::Distribution::Gaussian}, {"identity", cutlass::Distribution::Identity}, {"sequential", cutlass::Distribution::Sequential}, {0, cutlass::Distribution::Invalid} }; struct { char const *label; double *member; } members[] = { {"min", &dist.uniform.min}, {"max", &dist.uniform.max}, {"mean", &dist.gaussian.mean}, {"stddev", &dist.gaussian.stddev}, {"start", &dist.sequential.start}, {"delta", &dist.sequential.delta}, {0, 0} }; using KeyValueVector = std::vector<std::pair<std::string, std::string> >; KeyValueVector values; args.get_cmd_line_argument_pairs(arg.c_str(), values); // The parser expects the first token to be a string identifying the distribution type. auto it = values.begin(); if (it != values.end()) { for (int i = 0; distribution_kinds[i].label; ++i) { if (it->first == distribution_kinds[i].label) { dist.kind = distribution_kinds[i].kind; break; } } ++it; } // Subsequent key-value pairs update the named field of the distribution struct. for (; it != values.end(); ++it) { // Integer scaling factor - if < 0, no integer rounding is performed. if ((it->first.compare("scale") == 0) && !it->second.empty()) { std::stringstream ss; ss << it->second; ss >> dist.int_scale; continue; // next token } // Casts as integer without scaling if (it->first.compare("integer") == 0) { dist.int_scale = 0; continue; // next token } // initialize other members for (int m = 0; members[m].label; ++m) { if (it->first == members[m].label && !it->second.empty()) { std::stringstream ss; ss << it->second; ss >> *(members[m].member); } } } } void Options::Initialization::print_usage(std::ostream &out) const { out << "Initialization:\n" << " --initialization=<bool> " << " Enables initialization (default: true). If false, device memory is" << end_of_line << " not initialized after allocation.\n\n" << " --initialization-provider=<provider> " << " Selects initialization provider {host, device*}. (default: '*')\n\n" << " --dist=<distribution> " << " Data distribution of input tensors {uniform*, gaussian, identity, sequential}" << end_of_line << " --dist=uniform,min:<double>,max:<double>,scale:<integer>" << end_of_line << " --dist=gaussian,mean:<double>,stddev:<double>,scale:<integer>" << end_of_line << " --dist=sequential,start:<double>,delta:<double>,scale:<integer>" << end_of_line << " --dist=identity\n\n" << " --seed=<int> " << " Random number generator seed. Used to enforce deterministic" << end_of_line << " initialization.\n\n"; } void Options::Initialization::print_options(std::ostream &out, int indent) const { } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Library::Library(cutlass::CommandLine const &cmdline) { algorithm_mode = AlgorithmMode::kDefault; if (cmdline.check_cmd_line_flag("library-algo-mode")) { std::string mode = "default"; cmdline.get_cmd_line_argument("library-algo-mode", mode); algorithm_mode = from_string<AlgorithmMode>(mode); } if (cmdline.check_cmd_line_flag("library-algos")) { // If algorithms are specified, override as kBest. algorithm_mode = AlgorithmMode::kBest; std::vector<std::string> tokens; cmdline.get_cmd_line_arguments("library-algos", tokens); algorithms.reserve(tokens.size()); for (auto const & token : tokens) { if (token.find(":")) { // todo - tokenized range } else { int algo; std::stringstream ss; ss << token; ss >> algo; algorithms.push_back(algo); } } } } void Options::Library::print_usage(std::ostream &out) const { out << "Library:\n" << " --library-algo-mode=<mode> " << " Indicates algorithm mode used to call libraries such as cuBLAS and cuDNN.\n" << " " << " mode={default*,matching,best}\n\n" << " --library-algos=<range-list> " << " If --algorithm-mode=best, permits specifying a selection of algorithms.\n\n"; } void Options::Library::print_options(std::ostream &out, int indent) const { out << indent_str(indent) << "library-algo-mode: " << to_string(algorithm_mode) << "\n" << indent_str(indent) << "library-algos: "; int j = 0; for (int x : algorithms) { out << (j++ ? "," : "") << x; } out << "\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Profiling::Profiling(cutlass::CommandLine const &cmdline) { cmdline.get_cmd_line_argument("workspace-count", workspace_count, 0); cmdline.get_cmd_line_argument("warmup-iterations", warmup_iterations, 10); cmdline.get_cmd_line_argument("profiling-iterations", iterations, 100); cmdline.get_cmd_line_argument("sleep-duration", sleep_duration, 50); cmdline.get_cmd_line_argument("profiling-enabled", enabled, true); if (cmdline.check_cmd_line_flag("providers")) { std::vector<std::string> tokens; cmdline.get_cmd_line_arguments("providers", tokens); providers.clear(); for (auto const &token : tokens) { providers.push_back(library::from_string<library::Provider>(token)); } } else { providers.push_back(library::Provider::kCUTLASS); providers.push_back(library::Provider::kCUBLAS); providers.push_back(library::Provider::kCUDNN); } } void Options::Profiling::print_usage(std::ostream &out) const { out << "Profiling:\n" << " --workspace-count=<workspace count> " << " Number of discrete workspaces maintained to avoid cache-resident " << end_of_line << " If zero (default), the amount is chosen for each workload based on " << end_of_line << " capacity of the last-level cache.\n\n" << " --profiling-iterations=<iterations> " << " Number of iterations to profile each kernel. If zero, kernels" << end_of_line << " are launched up to the profiling duration.\n\n" << " --warmup-iterations=<iterations> " << " Number of iterations to execute each kernel prior to profiling.\n\n" << " --sleep-duration=<duration> " << " Number of ms to sleep between profiling periods (ms).\n\n" << " --profiling-enabled=<bool> " << " If true, profiling is actually conducted.\n\n" ; } void Options::Profiling::print_options(std::ostream &out, int indent) const { out << indent_str(indent) << "profiling_iterations: " << iterations << "\n" << indent_str(indent) << "sleep_duration: " << sleep_duration << "\n" << indent_str(indent) << "profiling_enabled: " << enabled << "\n" << indent_str(indent) << "providers: ["; int j = 0; for (auto const & provider : providers) { out << (j++ ? ", " : "") << library::to_string(provider); } out << "]\n"; } /// Returns true if a provider is enabled bool Options::Profiling::provider_enabled(library::Provider provider) const { return std::find(providers.begin(), providers.end(), provider) != providers.end(); } /// Returns the index of a provider if its enabled size_t Options::Profiling::index(library::Provider provider) const { size_t idx = 0; for (auto const & x : providers) { if (x == provider) { return idx; } ++idx; } return idx; } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Verification::Verification(cutlass::CommandLine const &cmdline) { cmdline.get_cmd_line_argument("verification-enabled", enabled, true); cmdline.get_cmd_line_argument("epsilon", epsilon, 0.05); cmdline.get_cmd_line_argument("nonzero-floor", nonzero_floor, 1.0 / 256.0); if (cmdline.check_cmd_line_flag("save-workspace")) { std::string value; cmdline.get_cmd_line_argument("save-workspace", value); save_workspace = from_string<SaveWorkspace>(value); } else { save_workspace = SaveWorkspace::kNever; } if (cmdline.check_cmd_line_flag("verification-providers")) { std::vector<std::string> tokens; cmdline.get_cmd_line_arguments("verification-providers", tokens); providers.clear(); for (auto const &token : tokens) { library::Provider provider = library::from_string<library::Provider>(token); if (provider != library::Provider::kInvalid) { providers.push_back(provider); } } } else { providers.push_back(library::Provider::kCUBLAS); providers.push_back(library::Provider::kReferenceDevice); providers.push_back(library::Provider::kCUDNN); } } void Options::Verification::print_usage(std::ostream &out) const { out << "Verification:\n" << " --verification-enabled=<bool> " << " Whether to perform verification checks.\n\n" << " --epsilon=<error> " << " Error threshold. Setting to zero (default) requires" << end_of_line << " bit-level equivalence.\n\n" << " --nonzero-floor=<floor> " << " Results whose absolute value is less than this quantity" << end_of_line << " are treated as zero for comparisons.\n\n" << " --save-workspace=<string> " << " Specifies when to save the GEMM inputs and results to the filesystem." << end_of_line << " --save-workspace=never never save workspace (default)" << end_of_line << " --save-workspace=incorrect save workspace for incorrect results" << end_of_line << " --save-workspace=always always save workspace\n\n" << " --verification-providers=<providers> " << " List of providers used to verify result. (default: '*')" << end_of_line << " Gemm verification-providers {cublas*}" << end_of_line << " Conv2d verification-providers {cudnn*, device*, host}" << "\n\n"; } void Options::Verification::print_options(std::ostream &out, int indent) const { out << indent_str(indent) << "verification_enabled: " << enabled << "\n" << indent_str(indent) << "epsilon: " << epsilon << "\n" << indent_str(indent) << "save_workspace: " << to_string(save_workspace) << "\n" << indent_str(indent) << "verification_providers: ["; int j = 0; for (auto const & provider : providers) { out << (j++ ? ", " : "") << library::to_string(provider); } out << "]\n"; } /// Returns true if a provider is enabled bool Options::Verification::provider_enabled(library::Provider provider) const { return std::find(providers.begin(), providers.end(), provider) != providers.end(); } /// Returns the index of a provider if its enabled size_t Options::Verification::index(library::Provider provider) const { size_t idx = 0; for (auto const & x : providers) { if (x == provider) { return idx; } ++idx; } return idx; } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Report::Report(cutlass::CommandLine const &cmdline) { cmdline.get_cmd_line_argument("append", append, false); cmdline.get_cmd_line_argument("output", output_path); cmdline.get_cmd_line_argument("junit-output", junit_output_path); if (cmdline.check_cmd_line_flag("tags")) { cmdline.get_cmd_line_argument_pairs("tags", pivot_tags); } cmdline.get_cmd_line_argument("report-not-run", report_not_run, false); cmdline.get_cmd_line_argument("verbose", verbose, true); cmdline.get_cmd_line_argument("sort-results", sort_results, false); } void Options::Report::print_usage(std::ostream &out) const { out << "Report:\n" << " --append=<bool> " << " If true, result is appended to possibly existing file. Otherwise, " << end_of_line << " any existing file is overwritten.\n\n" << " --output=<path> " << " Path to output file for machine readable results. Operation kind and '.csv' is appended.\n\n" << " --junit-output=<path> " << " Path to junit output file for result reporting. Operation kind and '.junit.xml' is appended.\n\n" << " --report-not-run=<bool> " << " If true, reports the status of all kernels including those that" << end_of_line << " do not satisfy the given arguments.\n\n" << " --tags=<column:tag,...> " << " Inserts leading columns in output table and uniform values for each" << end_of_line << " column. Useful for generating pivot tables.\n\n" << " --verbose=<bool> " << " Prints human-readable text to stdout. If false, nothing is written to stdout.\n\n" << " --sort-results=<bool> " << " Sorts results (by flops-per-byte).\n\n"; } void Options::Report::print_options(std::ostream &out, int indent) const { out << indent_str(indent) << "append: " << append << "\n" << indent_str(indent) << "output: " << output_path << "\n" << indent_str(indent) << "junit-output: " << junit_output_path << "\n" << indent_str(indent) << "report_not_run: " << report_not_run << "\n" << indent_str(indent) << "tags:\n"; for (auto const & tag : pivot_tags) { out << indent_str(indent + 1) << tag.first << ": " << tag.second << "\n"; } out << indent_str(indent) << "verbose: " << verbose << "\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::About::About(cutlass::CommandLine const &cmdline) { help = cmdline.check_cmd_line_flag("help"); version = cmdline.check_cmd_line_flag("version"); device_info = cmdline.check_cmd_line_flag("device-info"); } void Options::About::print_usage(std::ostream &out) const { out << "About:\n" << " --version "; print_version(out); out << "\n"; } void Options::About::print_version(std::ostream &out) { out << "CUTLASS " << cutlass::getVersionString() << " built on " << __DATE__ << " at " << __TIME__; if (!cutlass::getGitRevision().empty()) out << " with commit " << cutlass::getGitRevision() << ""; } void Options::About::print_options(std::ostream &out, int indent) const { } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Options(cutlass::CommandLine const &cmdline): cmdline(cmdline), device(cmdline), initialization(cmdline), library(cmdline), profiling(cmdline), verification(cmdline), report(cmdline), about(cmdline) { if (cmdline.check_cmd_line_flag("mode")) { std::string token; cmdline.get_cmd_line_argument("mode", token); execution_mode = from_string<ExecutionMode>(token); } else { execution_mode = ExecutionMode::kProfile; } // Enumerating kernels is equivalent to a dry run. if (execution_mode == ExecutionMode::kEnumerate) { execution_mode = ExecutionMode::kDryRun; } if (cmdline.check_cmd_line_flag("operation")) { std::string str; cmdline.get_cmd_line_argument("operation", str); operation_kind = library::from_string<library::OperationKind>(str); } else if (cmdline.check_cmd_line_flag("function")) { std::string str; cmdline.get_cmd_line_argument("function", str); operation_kind = library::from_string<library::OperationKind>(str); } else { operation_kind = library::OperationKind::kInvalid; } if (cmdline.check_cmd_line_flag("operation_names")) { cmdline.get_cmd_line_arguments("operation_names", operation_names); } else if (cmdline.check_cmd_line_flag("kernels")) { cmdline.get_cmd_line_arguments("kernels", operation_names); profiling.error_on_no_match = cmdline.check_cmd_line_flag("error-on-no-match"); } if (cmdline.check_cmd_line_flag("ignore-kernels")) { cmdline.get_cmd_line_arguments("ignore-kernels", excluded_operation_names); profiling.error_on_no_match = cmdline.check_cmd_line_flag("error-on-no-match"); } // Prevent launches on the device for anything other than CUTLASS operation // Allow verification only on host if (execution_mode == ExecutionMode::kTrace) { initialization.provider = library::Provider::kReferenceHost; verification.providers = {library::Provider::kReferenceHost}; profiling.enabled = false; } } void Options::print_usage(std::ostream &out) const { out << "CUTLASS Profiler\n" << "usage:\n\n" << " cutlass_profiler [options]\n\n" << " --help\n\n" << " --mode=<string> " << " Cutlass profiler execution mode." << end_of_line << " --mode=profile regular verification and profiling (default)" << end_of_line << " --mode=dry_run no kernels are launched or workspaces allocated" << end_of_line << " --mode=enumerate lists all operation kind and operations" << end_of_line << " --mode=trace executes a single device-side computation with" << end_of_line << " no other kernel launches\n\n" << " --device-info " << " Prints information on all GPUs present in the system\n\n" << " --operation=<operation_kind> " << " CUTLASS operation to profile.\n\n" << " --kernels=<string_list> " << " Filter operations by kernel names. For example, call all kernels with" << end_of_line << " (\"s1688\" and \"nt\") or (\"s844\" and \"tn\" and \"align8\") in their" << end_of_line << " operation name using --kernels=\"s1688*nt, s884*tn*align8\"\n\n" << " --ignore-kernels=<string_list> " << " Excludes kernels whose names match anything in this list.\n\n" ; // // Detailed options // device.print_usage(out); out << "\n"; initialization.print_usage(out); out << "\n"; library.print_usage(out); out << "\n"; profiling.print_usage(out); out << "\n"; verification.print_usage(out); out << "\n"; report.print_usage(out); out << "\n"; about.print_usage(out); out << "\n"; } void Options::print_options(std::ostream &out) const { out << "options:\n" << " help: " << about.help << "\n" << " mode: " << to_string(execution_mode) << "\n"; out << " device:\n"; device.print_options(out, 2); out << " initialization:\n"; initialization.print_options(out, 2); out << " profiling:\n"; profiling.print_options(out, 2); out << " verification:\n"; verification.print_options(out, 2); out << " report:\n"; report.print_options(out, 2); } std::string Options::indent_str(int indent) { return std::string(indent * 2, ' '); } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass
ec0fb1a7f4dc08a0b46798cbacf0934964afdc26.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Command line options for performance test program */ #include <algorithm> #include "cutlass/cutlass.h" #include "cutlass/version.h" #include "cutlass/library/util.h" #include "options.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Newline and indent for help strings static char const *end_of_line = "\n "; ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Device::Device(cutlass::CommandLine const &cmdline) { cmdline.get_cmd_line_argument("device", device, 0); cudaError_t result; result = cudaGetDeviceProperties(&properties, device); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed for given device"); } result = cudaSetDevice(device); if (result != cudaSuccess) { throw std::runtime_error("cudaSetDevice() failed for given device."); } // Permit overriding the compute capability if (cmdline.check_cmd_line_flag("compute-capability")) { int cc = compute_capability(); cmdline.get_cmd_line_argument("compute-capability", cc, cc); properties.major = cc / 10; properties.minor = cc % 10; } // Permit overriding the L2 cache capacity if (cmdline.check_cmd_line_flag("llc-capacity")) { int llc_capacity = 0; cmdline.get_cmd_line_argument("llc-capacity", llc_capacity, 0); if (llc_capacity >= 0) { properties.l2CacheSize = (llc_capacity << 10); } } } void Options::Device::print_usage(std::ostream &out) const { out << "Device:\n" << " --device=<int> " << " CUDA Device ID\n\n"; int device_count = 0; cudaError_t result = cudaGetDeviceCount(&device_count); if (result != cudaSuccess) { out << " <could not query for CUDA devices>\n"; } else { for (int idx = 0; idx < device_count; ++idx) { cudaDeviceProp prop; result = cudaGetDeviceProperties(&prop, idx); if (result != cudaSuccess) { out << " <could not obtain device properties for device " << idx << ">" << std::endl; break; } else { out << " [" << idx << "] - " << prop.name << " - SM " << prop.major << "." << prop.minor << ", " << prop.multiProcessorCount << " SMs @ " << (prop.clockRate / 1000.0) << " MHz, " << "L2 cache: " << (prop.l2CacheSize >> 20) << " MB, Global Memory: " << (prop.totalGlobalMem >> 30) << " GB" << std::endl; } } out << "\n"; } out << " --compute-capability=<int> " << " Override the compute capability.\n\n" << " --llc-capacity=<capacity in KiB> " << " Capacity of last-level cache in kilobytes. If this is non-zero," << end_of_line << " profiling phases cycle through different input tensors to induce" << end_of_line << " capacity misses in the L2.\n\n"; } void Options::Device::print_device_info(std::ostream &out) const { int num_devices; cudaDeviceProp props; cudaError_t result; result = cudaGetDeviceCount(&num_devices); if (result != cudaSuccess) { throw std::runtime_error("cudaGetNumDevices() failed"); } out << "Device Name,SM,CUDA Device ID,Phy Device ID" << std::endl; for(int device = 0; device < num_devices; device++) { result = cudaSetDevice(device); if (result != cudaSuccess) { throw std::runtime_error("cudaSetDevice() failed for device"); } result = cudaGetDeviceProperties(&props, device); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties failed for device"); } out << props.name << "," << props.major << props.minor << "," << device << "," << props.multiGpuBoardGroupID << std::endl; } } void Options::Device::print_options(std::ostream &out, int indent) const { out << indent_str(indent) << "device: " << device << "\n" << indent_str(indent) << "clock: " << int(double(properties.clockRate) / 1000.0) << "\n" << indent_str(indent) << "compute-capability: " << compute_capability() << "\n"; } /// Returns the compute capability of the listed device (e.g. 61, 60, 70, 75) int Options::Device::compute_capability() const { return properties.major * 10 + properties.minor; } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Initialization::Initialization(cutlass::CommandLine const &cmdline) { cmdline.get_cmd_line_argument("initialization-enabled", enabled, true); if (cmdline.check_cmd_line_flag("initialization-provider")) { std::string str; cmdline.get_cmd_line_argument("initialization-provider", str); provider = library::from_string<library::Provider>(str); if (provider == library::Provider::kInvalid) { enabled = false; } else if (provider != library::Provider::kReferenceHost && provider != library::Provider::kReferenceDevice) { throw std::runtime_error("Unsupported initialization provider specified."); } } else { provider = library::Provider::kReferenceDevice; } cmdline.get_cmd_line_argument("seed", seed, 2019); if (cmdline.check_cmd_line_flag("dist")) { // user has set the data distribution (fix data distribution once set) fix_data_distribution = true; // set user provided data distribution get_distribution(cmdline, "dist", data_distribution); } else { // profiler chosen data distribution (allowed to change based on numeric types) fix_data_distribution = false; // set uniform data distribution with range [-4, 4] data_distribution.set_uniform(-4, 4, 0); } } /// Gets the initial distribution void Options::Initialization::get_distribution( cutlass::CommandLine const &args, std::string const &arg, cutlass::Distribution &dist) { struct { const char *label; cutlass::Distribution::Kind kind; } distribution_kinds[] = { {"uniform", cutlass::Distribution::Uniform}, {"gaussian", cutlass::Distribution::Gaussian}, {"identity", cutlass::Distribution::Identity}, {"sequential", cutlass::Distribution::Sequential}, {0, cutlass::Distribution::Invalid} }; struct { char const *label; double *member; } members[] = { {"min", &dist.uniform.min}, {"max", &dist.uniform.max}, {"mean", &dist.gaussian.mean}, {"stddev", &dist.gaussian.stddev}, {"start", &dist.sequential.start}, {"delta", &dist.sequential.delta}, {0, 0} }; using KeyValueVector = std::vector<std::pair<std::string, std::string> >; KeyValueVector values; args.get_cmd_line_argument_pairs(arg.c_str(), values); // The parser expects the first token to be a string identifying the distribution type. auto it = values.begin(); if (it != values.end()) { for (int i = 0; distribution_kinds[i].label; ++i) { if (it->first == distribution_kinds[i].label) { dist.kind = distribution_kinds[i].kind; break; } } ++it; } // Subsequent key-value pairs update the named field of the distribution struct. for (; it != values.end(); ++it) { // Integer scaling factor - if < 0, no integer rounding is performed. if ((it->first.compare("scale") == 0) && !it->second.empty()) { std::stringstream ss; ss << it->second; ss >> dist.int_scale; continue; // next token } // Casts as integer without scaling if (it->first.compare("integer") == 0) { dist.int_scale = 0; continue; // next token } // initialize other members for (int m = 0; members[m].label; ++m) { if (it->first == members[m].label && !it->second.empty()) { std::stringstream ss; ss << it->second; ss >> *(members[m].member); } } } } void Options::Initialization::print_usage(std::ostream &out) const { out << "Initialization:\n" << " --initialization=<bool> " << " Enables initialization (default: true). If false, device memory is" << end_of_line << " not initialized after allocation.\n\n" << " --initialization-provider=<provider> " << " Selects initialization provider {host, device*}. (default: '*')\n\n" << " --dist=<distribution> " << " Data distribution of input tensors {uniform*, gaussian, identity, sequential}" << end_of_line << " --dist=uniform,min:<double>,max:<double>,scale:<integer>" << end_of_line << " --dist=gaussian,mean:<double>,stddev:<double>,scale:<integer>" << end_of_line << " --dist=sequential,start:<double>,delta:<double>,scale:<integer>" << end_of_line << " --dist=identity\n\n" << " --seed=<int> " << " Random number generator seed. Used to enforce deterministic" << end_of_line << " initialization.\n\n"; } void Options::Initialization::print_options(std::ostream &out, int indent) const { } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Library::Library(cutlass::CommandLine const &cmdline) { algorithm_mode = AlgorithmMode::kDefault; if (cmdline.check_cmd_line_flag("library-algo-mode")) { std::string mode = "default"; cmdline.get_cmd_line_argument("library-algo-mode", mode); algorithm_mode = from_string<AlgorithmMode>(mode); } if (cmdline.check_cmd_line_flag("library-algos")) { // If algorithms are specified, override as kBest. algorithm_mode = AlgorithmMode::kBest; std::vector<std::string> tokens; cmdline.get_cmd_line_arguments("library-algos", tokens); algorithms.reserve(tokens.size()); for (auto const & token : tokens) { if (token.find(":")) { // todo - tokenized range } else { int algo; std::stringstream ss; ss << token; ss >> algo; algorithms.push_back(algo); } } } } void Options::Library::print_usage(std::ostream &out) const { out << "Library:\n" << " --library-algo-mode=<mode> " << " Indicates algorithm mode used to call libraries such as cuBLAS and cuDNN.\n" << " " << " mode={default*,matching,best}\n\n" << " --library-algos=<range-list> " << " If --algorithm-mode=best, permits specifying a selection of algorithms.\n\n"; } void Options::Library::print_options(std::ostream &out, int indent) const { out << indent_str(indent) << "library-algo-mode: " << to_string(algorithm_mode) << "\n" << indent_str(indent) << "library-algos: "; int j = 0; for (int x : algorithms) { out << (j++ ? "," : "") << x; } out << "\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Profiling::Profiling(cutlass::CommandLine const &cmdline) { cmdline.get_cmd_line_argument("workspace-count", workspace_count, 0); cmdline.get_cmd_line_argument("warmup-iterations", warmup_iterations, 10); cmdline.get_cmd_line_argument("profiling-iterations", iterations, 100); cmdline.get_cmd_line_argument("sleep-duration", sleep_duration, 50); cmdline.get_cmd_line_argument("profiling-enabled", enabled, true); if (cmdline.check_cmd_line_flag("providers")) { std::vector<std::string> tokens; cmdline.get_cmd_line_arguments("providers", tokens); providers.clear(); for (auto const &token : tokens) { providers.push_back(library::from_string<library::Provider>(token)); } } else { providers.push_back(library::Provider::kCUTLASS); providers.push_back(library::Provider::kCUBLAS); providers.push_back(library::Provider::kCUDNN); } } void Options::Profiling::print_usage(std::ostream &out) const { out << "Profiling:\n" << " --workspace-count=<workspace count> " << " Number of discrete workspaces maintained to avoid cache-resident " << end_of_line << " If zero (default), the amount is chosen for each workload based on " << end_of_line << " capacity of the last-level cache.\n\n" << " --profiling-iterations=<iterations> " << " Number of iterations to profile each kernel. If zero, kernels" << end_of_line << " are launched up to the profiling duration.\n\n" << " --warmup-iterations=<iterations> " << " Number of iterations to execute each kernel prior to profiling.\n\n" << " --sleep-duration=<duration> " << " Number of ms to sleep between profiling periods (ms).\n\n" << " --profiling-enabled=<bool> " << " If true, profiling is actually conducted.\n\n" ; } void Options::Profiling::print_options(std::ostream &out, int indent) const { out << indent_str(indent) << "profiling_iterations: " << iterations << "\n" << indent_str(indent) << "sleep_duration: " << sleep_duration << "\n" << indent_str(indent) << "profiling_enabled: " << enabled << "\n" << indent_str(indent) << "providers: ["; int j = 0; for (auto const & provider : providers) { out << (j++ ? ", " : "") << library::to_string(provider); } out << "]\n"; } /// Returns true if a provider is enabled bool Options::Profiling::provider_enabled(library::Provider provider) const { return std::find(providers.begin(), providers.end(), provider) != providers.end(); } /// Returns the index of a provider if its enabled size_t Options::Profiling::index(library::Provider provider) const { size_t idx = 0; for (auto const & x : providers) { if (x == provider) { return idx; } ++idx; } return idx; } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Verification::Verification(cutlass::CommandLine const &cmdline) { cmdline.get_cmd_line_argument("verification-enabled", enabled, true); cmdline.get_cmd_line_argument("epsilon", epsilon, 0.05); cmdline.get_cmd_line_argument("nonzero-floor", nonzero_floor, 1.0 / 256.0); if (cmdline.check_cmd_line_flag("save-workspace")) { std::string value; cmdline.get_cmd_line_argument("save-workspace", value); save_workspace = from_string<SaveWorkspace>(value); } else { save_workspace = SaveWorkspace::kNever; } if (cmdline.check_cmd_line_flag("verification-providers")) { std::vector<std::string> tokens; cmdline.get_cmd_line_arguments("verification-providers", tokens); providers.clear(); for (auto const &token : tokens) { library::Provider provider = library::from_string<library::Provider>(token); if (provider != library::Provider::kInvalid) { providers.push_back(provider); } } } else { providers.push_back(library::Provider::kCUBLAS); providers.push_back(library::Provider::kReferenceDevice); providers.push_back(library::Provider::kCUDNN); } } void Options::Verification::print_usage(std::ostream &out) const { out << "Verification:\n" << " --verification-enabled=<bool> " << " Whether to perform verification checks.\n\n" << " --epsilon=<error> " << " Error threshold. Setting to zero (default) requires" << end_of_line << " bit-level equivalence.\n\n" << " --nonzero-floor=<floor> " << " Results whose absolute value is less than this quantity" << end_of_line << " are treated as zero for comparisons.\n\n" << " --save-workspace=<string> " << " Specifies when to save the GEMM inputs and results to the filesystem." << end_of_line << " --save-workspace=never never save workspace (default)" << end_of_line << " --save-workspace=incorrect save workspace for incorrect results" << end_of_line << " --save-workspace=always always save workspace\n\n" << " --verification-providers=<providers> " << " List of providers used to verify result. (default: '*')" << end_of_line << " Gemm verification-providers {cublas*}" << end_of_line << " Conv2d verification-providers {cudnn*, device*, host}" << "\n\n"; } void Options::Verification::print_options(std::ostream &out, int indent) const { out << indent_str(indent) << "verification_enabled: " << enabled << "\n" << indent_str(indent) << "epsilon: " << epsilon << "\n" << indent_str(indent) << "save_workspace: " << to_string(save_workspace) << "\n" << indent_str(indent) << "verification_providers: ["; int j = 0; for (auto const & provider : providers) { out << (j++ ? ", " : "") << library::to_string(provider); } out << "]\n"; } /// Returns true if a provider is enabled bool Options::Verification::provider_enabled(library::Provider provider) const { return std::find(providers.begin(), providers.end(), provider) != providers.end(); } /// Returns the index of a provider if its enabled size_t Options::Verification::index(library::Provider provider) const { size_t idx = 0; for (auto const & x : providers) { if (x == provider) { return idx; } ++idx; } return idx; } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Report::Report(cutlass::CommandLine const &cmdline) { cmdline.get_cmd_line_argument("append", append, false); cmdline.get_cmd_line_argument("output", output_path); cmdline.get_cmd_line_argument("junit-output", junit_output_path); if (cmdline.check_cmd_line_flag("tags")) { cmdline.get_cmd_line_argument_pairs("tags", pivot_tags); } cmdline.get_cmd_line_argument("report-not-run", report_not_run, false); cmdline.get_cmd_line_argument("verbose", verbose, true); cmdline.get_cmd_line_argument("sort-results", sort_results, false); } void Options::Report::print_usage(std::ostream &out) const { out << "Report:\n" << " --append=<bool> " << " If true, result is appended to possibly existing file. Otherwise, " << end_of_line << " any existing file is overwritten.\n\n" << " --output=<path> " << " Path to output file for machine readable results. Operation kind and '.csv' is appended.\n\n" << " --junit-output=<path> " << " Path to junit output file for result reporting. Operation kind and '.junit.xml' is appended.\n\n" << " --report-not-run=<bool> " << " If true, reports the status of all kernels including those that" << end_of_line << " do not satisfy the given arguments.\n\n" << " --tags=<column:tag,...> " << " Inserts leading columns in output table and uniform values for each" << end_of_line << " column. Useful for generating pivot tables.\n\n" << " --verbose=<bool> " << " Prints human-readable text to stdout. If false, nothing is written to stdout.\n\n" << " --sort-results=<bool> " << " Sorts results (by flops-per-byte).\n\n"; } void Options::Report::print_options(std::ostream &out, int indent) const { out << indent_str(indent) << "append: " << append << "\n" << indent_str(indent) << "output: " << output_path << "\n" << indent_str(indent) << "junit-output: " << junit_output_path << "\n" << indent_str(indent) << "report_not_run: " << report_not_run << "\n" << indent_str(indent) << "tags:\n"; for (auto const & tag : pivot_tags) { out << indent_str(indent + 1) << tag.first << ": " << tag.second << "\n"; } out << indent_str(indent) << "verbose: " << verbose << "\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::About::About(cutlass::CommandLine const &cmdline) { help = cmdline.check_cmd_line_flag("help"); version = cmdline.check_cmd_line_flag("version"); device_info = cmdline.check_cmd_line_flag("device-info"); } void Options::About::print_usage(std::ostream &out) const { out << "About:\n" << " --version "; print_version(out); out << "\n"; } void Options::About::print_version(std::ostream &out) { out << "CUTLASS " << cutlass::getVersionString() << " built on " << __DATE__ << " at " << __TIME__; if (!cutlass::getGitRevision().empty()) out << " with commit " << cutlass::getGitRevision() << ""; } void Options::About::print_options(std::ostream &out, int indent) const { } ///////////////////////////////////////////////////////////////////////////////////////////////// Options::Options(cutlass::CommandLine const &cmdline): cmdline(cmdline), device(cmdline), initialization(cmdline), library(cmdline), profiling(cmdline), verification(cmdline), report(cmdline), about(cmdline) { if (cmdline.check_cmd_line_flag("mode")) { std::string token; cmdline.get_cmd_line_argument("mode", token); execution_mode = from_string<ExecutionMode>(token); } else { execution_mode = ExecutionMode::kProfile; } // Enumerating kernels is equivalent to a dry run. if (execution_mode == ExecutionMode::kEnumerate) { execution_mode = ExecutionMode::kDryRun; } if (cmdline.check_cmd_line_flag("operation")) { std::string str; cmdline.get_cmd_line_argument("operation", str); operation_kind = library::from_string<library::OperationKind>(str); } else if (cmdline.check_cmd_line_flag("function")) { std::string str; cmdline.get_cmd_line_argument("function", str); operation_kind = library::from_string<library::OperationKind>(str); } else { operation_kind = library::OperationKind::kInvalid; } if (cmdline.check_cmd_line_flag("operation_names")) { cmdline.get_cmd_line_arguments("operation_names", operation_names); } else if (cmdline.check_cmd_line_flag("kernels")) { cmdline.get_cmd_line_arguments("kernels", operation_names); profiling.error_on_no_match = cmdline.check_cmd_line_flag("error-on-no-match"); } if (cmdline.check_cmd_line_flag("ignore-kernels")) { cmdline.get_cmd_line_arguments("ignore-kernels", excluded_operation_names); profiling.error_on_no_match = cmdline.check_cmd_line_flag("error-on-no-match"); } // Prevent launches on the device for anything other than CUTLASS operation // Allow verification only on host if (execution_mode == ExecutionMode::kTrace) { initialization.provider = library::Provider::kReferenceHost; verification.providers = {library::Provider::kReferenceHost}; profiling.enabled = false; } } void Options::print_usage(std::ostream &out) const { out << "CUTLASS Profiler\n" << "usage:\n\n" << " cutlass_profiler [options]\n\n" << " --help\n\n" << " --mode=<string> " << " Cutlass profiler execution mode." << end_of_line << " --mode=profile regular verification and profiling (default)" << end_of_line << " --mode=dry_run no kernels are launched or workspaces allocated" << end_of_line << " --mode=enumerate lists all operation kind and operations" << end_of_line << " --mode=trace executes a single device-side computation with" << end_of_line << " no other kernel launches\n\n" << " --device-info " << " Prints information on all GPUs present in the system\n\n" << " --operation=<operation_kind> " << " CUTLASS operation to profile.\n\n" << " --kernels=<string_list> " << " Filter operations by kernel names. For example, call all kernels with" << end_of_line << " (\"s1688\" and \"nt\") or (\"s844\" and \"tn\" and \"align8\") in their" << end_of_line << " operation name using --kernels=\"s1688*nt, s884*tn*align8\"\n\n" << " --ignore-kernels=<string_list> " << " Excludes kernels whose names match anything in this list.\n\n" ; // // Detailed options // device.print_usage(out); out << "\n"; initialization.print_usage(out); out << "\n"; library.print_usage(out); out << "\n"; profiling.print_usage(out); out << "\n"; verification.print_usage(out); out << "\n"; report.print_usage(out); out << "\n"; about.print_usage(out); out << "\n"; } void Options::print_options(std::ostream &out) const { out << "options:\n" << " help: " << about.help << "\n" << " mode: " << to_string(execution_mode) << "\n"; out << " device:\n"; device.print_options(out, 2); out << " initialization:\n"; initialization.print_options(out, 2); out << " profiling:\n"; profiling.print_options(out, 2); out << " verification:\n"; verification.print_options(out, 2); out << " report:\n"; report.print_options(out, 2); } std::string Options::indent_str(int indent) { return std::string(indent * 2, ' '); } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass
d71d181bf27496110d54297c53de9b7dfddb3bb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2022, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/factorization/par_ilut_kernels.hpp" #include <algorithm> #include <ginkgo/core/base/array.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/matrix/coo.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/dense.hpp> #include "core/components/prefix_sum_kernels.hpp" #include "core/matrix/coo_builder.hpp" #include "core/matrix/csr_builder.hpp" #include "core/matrix/csr_kernels.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/config.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/intrinsics.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/sorting.cuh" #include "cuda/components/thread_ids.cuh" #include "cuda/factorization/par_ilut_select_common.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The parallel ILUT factorization namespace. * * @ingroup factor */ namespace par_ilut_factorization { // subwarp sizes for filter kernels using compiled_kernels = syn::value_list<int, 1, 2, 4, 8, 16, 32, config::warp_size>; #include "common/cuda_hip/factorization/par_ilut_filter_kernels.hpp.inc" #include "common/cuda_hip/factorization/par_ilut_select_kernels.hpp.inc" template <int subwarp_size, typename ValueType, typename IndexType> void threshold_filter_approx(syn::value_list<int, subwarp_size>, std::shared_ptr<const DefaultExecutor> exec, const matrix::Csr<ValueType, IndexType>* m, IndexType rank, array<ValueType>* tmp, remove_complex<ValueType>* threshold, matrix::Csr<ValueType, IndexType>* m_out, matrix::Coo<ValueType, IndexType>* m_out_coo) { auto values = m->get_const_values(); IndexType size = m->get_num_stored_elements(); using AbsType = remove_complex<ValueType>; constexpr auto bucket_count = kernel::searchtree_width; auto max_num_threads = ceildiv(size, items_per_thread); auto max_num_blocks = ceildiv(max_num_threads, default_block_size); size_type tmp_size_totals = ceildiv((bucket_count + 1) * sizeof(IndexType), sizeof(ValueType)); size_type tmp_size_partials = ceildiv( bucket_count * max_num_blocks * sizeof(IndexType), sizeof(ValueType)); size_type tmp_size_oracles = ceildiv(size * sizeof(unsigned char), sizeof(ValueType)); size_type tmp_size_tree = ceildiv(kernel::searchtree_size * sizeof(AbsType), sizeof(ValueType)); size_type tmp_size = tmp_size_totals + tmp_size_partials + tmp_size_oracles + tmp_size_tree; tmp->resize_and_reset(tmp_size); auto total_counts = reinterpret_cast<IndexType*>(tmp->get_data()); auto partial_counts = reinterpret_cast<IndexType*>(tmp->get_data() + tmp_size_totals); auto oracles = reinterpret_cast<unsigned char*>( tmp->get_data() + tmp_size_totals + tmp_size_partials); auto tree = reinterpret_cast<AbsType*>(tmp->get_data() + tmp_size_totals + tmp_size_partials + tmp_size_oracles); sampleselect_count(exec, values, size, tree, oracles, partial_counts, total_counts); // determine bucket with correct rank auto bucket = static_cast<unsigned char>( sampleselect_find_bucket(exec, total_counts, rank).idx); *threshold = exec->copy_val_to_host(tree + kernel::searchtree_inner_size + bucket); // we implicitly set the first splitter to -inf, but 0 works as well if (bucket == 0) { *threshold = zero<AbsType>(); } // filter the elements auto old_row_ptrs = m->get_const_row_ptrs(); auto old_col_idxs = m->get_const_col_idxs(); auto old_vals = m->get_const_values(); // compute nnz for each row auto num_rows = static_cast<IndexType>(m->get_size()[0]); auto block_size = default_block_size / subwarp_size; auto num_blocks = ceildiv(num_rows, block_size); auto new_row_ptrs = m_out->get_row_ptrs(); if (num_blocks > 0) { hipLaunchKernelGGL(( kernel::bucket_filter_nnz<subwarp_size>) , dim3(num_blocks), dim3(default_block_size), 0, 0, old_row_ptrs, oracles, num_rows, bucket, new_row_ptrs); } // build row pointers components::prefix_sum(exec, new_row_ptrs, num_rows + 1); // build matrix auto new_nnz = exec->copy_val_to_host(new_row_ptrs + num_rows); // resize arrays and update aliases matrix::CsrBuilder<ValueType, IndexType> builder{m_out}; builder.get_col_idx_array().resize_and_reset(new_nnz); builder.get_value_array().resize_and_reset(new_nnz); auto new_col_idxs = m_out->get_col_idxs(); auto new_vals = m_out->get_values(); IndexType* new_row_idxs{}; if (m_out_coo) { matrix::CooBuilder<ValueType, IndexType> coo_builder{m_out_coo}; coo_builder.get_row_idx_array().resize_and_reset(new_nnz); coo_builder.get_col_idx_array() = make_array_view(exec, new_nnz, new_col_idxs); coo_builder.get_value_array() = make_array_view(exec, new_nnz, new_vals); new_row_idxs = m_out_coo->get_row_idxs(); } if (num_blocks > 0) { hipLaunchKernelGGL(( kernel::bucket_filter<subwarp_size>), dim3(num_blocks), dim3(default_block_size), 0, 0, old_row_ptrs, old_col_idxs, as_cuda_type(old_vals), oracles, num_rows, bucket, new_row_ptrs, new_row_idxs, new_col_idxs, as_cuda_type(new_vals)); } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_threshold_filter_approx, threshold_filter_approx); template <typename ValueType, typename IndexType> void threshold_filter_approx(std::shared_ptr<const DefaultExecutor> exec, const matrix::Csr<ValueType, IndexType>* m, IndexType rank, array<ValueType>& tmp, remove_complex<ValueType>& threshold, matrix::Csr<ValueType, IndexType>* m_out, matrix::Coo<ValueType, IndexType>* m_out_coo) { auto num_rows = m->get_size()[0]; auto total_nnz = m->get_num_stored_elements(); auto total_nnz_per_row = total_nnz / num_rows; select_threshold_filter_approx( compiled_kernels(), [&](int compiled_subwarp_size) { return total_nnz_per_row <= compiled_subwarp_size || compiled_subwarp_size == config::warp_size; }, syn::value_list<int>(), syn::type_list<>(), exec, m, rank, &tmp, &threshold, m_out, m_out_coo); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_PAR_ILUT_THRESHOLD_FILTER_APPROX_KERNEL); } // namespace par_ilut_factorization } // namespace cuda } // namespace kernels } // namespace gko
d71d181bf27496110d54297c53de9b7dfddb3bb5.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2022, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/factorization/par_ilut_kernels.hpp" #include <algorithm> #include <ginkgo/core/base/array.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/matrix/coo.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/dense.hpp> #include "core/components/prefix_sum_kernels.hpp" #include "core/matrix/coo_builder.hpp" #include "core/matrix/csr_builder.hpp" #include "core/matrix/csr_kernels.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/config.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/intrinsics.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/sorting.cuh" #include "cuda/components/thread_ids.cuh" #include "cuda/factorization/par_ilut_select_common.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The parallel ILUT factorization namespace. * * @ingroup factor */ namespace par_ilut_factorization { // subwarp sizes for filter kernels using compiled_kernels = syn::value_list<int, 1, 2, 4, 8, 16, 32, config::warp_size>; #include "common/cuda_hip/factorization/par_ilut_filter_kernels.hpp.inc" #include "common/cuda_hip/factorization/par_ilut_select_kernels.hpp.inc" template <int subwarp_size, typename ValueType, typename IndexType> void threshold_filter_approx(syn::value_list<int, subwarp_size>, std::shared_ptr<const DefaultExecutor> exec, const matrix::Csr<ValueType, IndexType>* m, IndexType rank, array<ValueType>* tmp, remove_complex<ValueType>* threshold, matrix::Csr<ValueType, IndexType>* m_out, matrix::Coo<ValueType, IndexType>* m_out_coo) { auto values = m->get_const_values(); IndexType size = m->get_num_stored_elements(); using AbsType = remove_complex<ValueType>; constexpr auto bucket_count = kernel::searchtree_width; auto max_num_threads = ceildiv(size, items_per_thread); auto max_num_blocks = ceildiv(max_num_threads, default_block_size); size_type tmp_size_totals = ceildiv((bucket_count + 1) * sizeof(IndexType), sizeof(ValueType)); size_type tmp_size_partials = ceildiv( bucket_count * max_num_blocks * sizeof(IndexType), sizeof(ValueType)); size_type tmp_size_oracles = ceildiv(size * sizeof(unsigned char), sizeof(ValueType)); size_type tmp_size_tree = ceildiv(kernel::searchtree_size * sizeof(AbsType), sizeof(ValueType)); size_type tmp_size = tmp_size_totals + tmp_size_partials + tmp_size_oracles + tmp_size_tree; tmp->resize_and_reset(tmp_size); auto total_counts = reinterpret_cast<IndexType*>(tmp->get_data()); auto partial_counts = reinterpret_cast<IndexType*>(tmp->get_data() + tmp_size_totals); auto oracles = reinterpret_cast<unsigned char*>( tmp->get_data() + tmp_size_totals + tmp_size_partials); auto tree = reinterpret_cast<AbsType*>(tmp->get_data() + tmp_size_totals + tmp_size_partials + tmp_size_oracles); sampleselect_count(exec, values, size, tree, oracles, partial_counts, total_counts); // determine bucket with correct rank auto bucket = static_cast<unsigned char>( sampleselect_find_bucket(exec, total_counts, rank).idx); *threshold = exec->copy_val_to_host(tree + kernel::searchtree_inner_size + bucket); // we implicitly set the first splitter to -inf, but 0 works as well if (bucket == 0) { *threshold = zero<AbsType>(); } // filter the elements auto old_row_ptrs = m->get_const_row_ptrs(); auto old_col_idxs = m->get_const_col_idxs(); auto old_vals = m->get_const_values(); // compute nnz for each row auto num_rows = static_cast<IndexType>(m->get_size()[0]); auto block_size = default_block_size / subwarp_size; auto num_blocks = ceildiv(num_rows, block_size); auto new_row_ptrs = m_out->get_row_ptrs(); if (num_blocks > 0) { kernel::bucket_filter_nnz<subwarp_size> <<<num_blocks, default_block_size>>>( old_row_ptrs, oracles, num_rows, bucket, new_row_ptrs); } // build row pointers components::prefix_sum(exec, new_row_ptrs, num_rows + 1); // build matrix auto new_nnz = exec->copy_val_to_host(new_row_ptrs + num_rows); // resize arrays and update aliases matrix::CsrBuilder<ValueType, IndexType> builder{m_out}; builder.get_col_idx_array().resize_and_reset(new_nnz); builder.get_value_array().resize_and_reset(new_nnz); auto new_col_idxs = m_out->get_col_idxs(); auto new_vals = m_out->get_values(); IndexType* new_row_idxs{}; if (m_out_coo) { matrix::CooBuilder<ValueType, IndexType> coo_builder{m_out_coo}; coo_builder.get_row_idx_array().resize_and_reset(new_nnz); coo_builder.get_col_idx_array() = make_array_view(exec, new_nnz, new_col_idxs); coo_builder.get_value_array() = make_array_view(exec, new_nnz, new_vals); new_row_idxs = m_out_coo->get_row_idxs(); } if (num_blocks > 0) { kernel::bucket_filter<subwarp_size><<<num_blocks, default_block_size>>>( old_row_ptrs, old_col_idxs, as_cuda_type(old_vals), oracles, num_rows, bucket, new_row_ptrs, new_row_idxs, new_col_idxs, as_cuda_type(new_vals)); } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_threshold_filter_approx, threshold_filter_approx); template <typename ValueType, typename IndexType> void threshold_filter_approx(std::shared_ptr<const DefaultExecutor> exec, const matrix::Csr<ValueType, IndexType>* m, IndexType rank, array<ValueType>& tmp, remove_complex<ValueType>& threshold, matrix::Csr<ValueType, IndexType>* m_out, matrix::Coo<ValueType, IndexType>* m_out_coo) { auto num_rows = m->get_size()[0]; auto total_nnz = m->get_num_stored_elements(); auto total_nnz_per_row = total_nnz / num_rows; select_threshold_filter_approx( compiled_kernels(), [&](int compiled_subwarp_size) { return total_nnz_per_row <= compiled_subwarp_size || compiled_subwarp_size == config::warp_size; }, syn::value_list<int>(), syn::type_list<>(), exec, m, rank, &tmp, &threshold, m_out, m_out_coo); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_PAR_ILUT_THRESHOLD_FILTER_APPROX_KERNEL); } // namespace par_ilut_factorization } // namespace cuda } // namespace kernels } // namespace gko
085fd09e13aa1b6f5ff294c544891f52438dbe8e.hip
// !!! This is a file automatically generated by hipify!!! /* * This program uses the device CURAND API to calculate what * proportion of pseudo-random ints have low bit set. * It then generates uniform results to calculate how many * are greater than .5. * It then generates normal results to calculate how many * are within one standard deviation of the mean. */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #define THREADS_PER_BLOCK 64 #define BLOCKS 64 #define SIMULTANEOUS_THREADS (THREADS_PER_BLOCK * BLOCKS) #define RANDOMS_PER_ITERATION 10000 #define KERNEL_ITERATIONS 50 #define CUDA_CALL(x) do { if((x) != hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) __global__ void setup_kernel(hiprandState_t *state) { int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; /* Each thread gets same seed, a different sequence number, no offset */ hiprand_init(1234, id, 0, &state[id]); } __global__ void generate_kernel(hiprandState_t *state, unsigned int *result) { int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ hiprandState_t localState = state[id]; /* Generate pseudo-random unsigned ints */ for(int n = 0; n < RANDOMS_PER_ITERATION; n++) { x = hiprand(&localState); /* Check if low bit set */ if(x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } int main(int argc, char *argv[]) { int i; unsigned int total; hiprandState_t *devStates; unsigned int *devResults, *hostResults; /* Allocate space for results on host */ hostResults = (unsigned int *)calloc(SIMULTANEOUS_THREADS, sizeof(unsigned int)); /* Allocate space for results on device */ CUDA_CALL(hipMalloc((void **)&devResults, SIMULTANEOUS_THREADS * sizeof(unsigned int))); /* Set results to 0 */ CUDA_CALL(hipMemset(devResults, 0, SIMULTANEOUS_THREADS * sizeof(unsigned int))); /* Allocate space for prng states on device */ CUDA_CALL(hipMalloc((void **)&devStates, SIMULTANEOUS_THREADS * sizeof(hiprandState_t))); // Set up RNG state objects. hipLaunchKernelGGL(( setup_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, devStates); // Generate a ton of random numbers across 50 passes. for(i = 0; i < KERNEL_ITERATIONS; i++) { hipLaunchKernelGGL(( generate_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, devStates, devResults); } // Copy device memory to host. CUDA_CALL(hipMemcpy(hostResults, devResults, SIMULTANEOUS_THREADS * sizeof(unsigned int), hipMemcpyDeviceToHost)); // Show result. total = 0; for(i = 0; i < SIMULTANEOUS_THREADS; i++) { total += hostResults[i]; } printf("Fraction with low bit set was %10.13f\n", (float)total / (1.0f * SIMULTANEOUS_THREADS * RANDOMS_PER_ITERATION * KERNEL_ITERATIONS)); /* Cleanup */ CUDA_CALL(hipFree(devStates)); CUDA_CALL(hipFree(devResults)); free(hostResults); printf("^^^^ kernel_example PASSED\n"); return EXIT_SUCCESS; }
085fd09e13aa1b6f5ff294c544891f52438dbe8e.cu
/* * This program uses the device CURAND API to calculate what * proportion of pseudo-random ints have low bit set. * It then generates uniform results to calculate how many * are greater than .5. * It then generates normal results to calculate how many * are within one standard deviation of the mean. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #define THREADS_PER_BLOCK 64 #define BLOCKS 64 #define SIMULTANEOUS_THREADS (THREADS_PER_BLOCK * BLOCKS) #define RANDOMS_PER_ITERATION 10000 #define KERNEL_ITERATIONS 50 #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) __global__ void setup_kernel(curandState *state) { int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(1234, id, 0, &state[id]); } __global__ void generate_kernel(curandState *state, unsigned int *result) { int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo-random unsigned ints */ for(int n = 0; n < RANDOMS_PER_ITERATION; n++) { x = curand(&localState); /* Check if low bit set */ if(x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } int main(int argc, char *argv[]) { int i; unsigned int total; curandState *devStates; unsigned int *devResults, *hostResults; /* Allocate space for results on host */ hostResults = (unsigned int *)calloc(SIMULTANEOUS_THREADS, sizeof(unsigned int)); /* Allocate space for results on device */ CUDA_CALL(cudaMalloc((void **)&devResults, SIMULTANEOUS_THREADS * sizeof(unsigned int))); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0, SIMULTANEOUS_THREADS * sizeof(unsigned int))); /* Allocate space for prng states on device */ CUDA_CALL(cudaMalloc((void **)&devStates, SIMULTANEOUS_THREADS * sizeof(curandState))); // Set up RNG state objects. setup_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(devStates); // Generate a ton of random numbers across 50 passes. for(i = 0; i < KERNEL_ITERATIONS; i++) { generate_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(devStates, devResults); } // Copy device memory to host. CUDA_CALL(cudaMemcpy(hostResults, devResults, SIMULTANEOUS_THREADS * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // Show result. total = 0; for(i = 0; i < SIMULTANEOUS_THREADS; i++) { total += hostResults[i]; } printf("Fraction with low bit set was %10.13f\n", (float)total / (1.0f * SIMULTANEOUS_THREADS * RANDOMS_PER_ITERATION * KERNEL_ITERATIONS)); /* Cleanup */ CUDA_CALL(cudaFree(devStates)); CUDA_CALL(cudaFree(devResults)); free(hostResults); printf("^^^^ kernel_example PASSED\n"); return EXIT_SUCCESS; }
f7c9b042c40c2fa5fa0988fea3e1c044f1d50db3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "test_shfl_broadcast_16.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *in = NULL; hipMalloc(&in, XSIZE*YSIZE); int *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( test_shfl_broadcast_16), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( test_shfl_broadcast_16), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( test_shfl_broadcast_16), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f7c9b042c40c2fa5fa0988fea3e1c044f1d50db3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "test_shfl_broadcast_16.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); int *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); test_shfl_broadcast_16<<<gridBlock,threadBlock>>>(in,out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { test_shfl_broadcast_16<<<gridBlock,threadBlock>>>(in,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { test_shfl_broadcast_16<<<gridBlock,threadBlock>>>(in,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
95ddb16d38362f9ae93a0b416db32b856c7990bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/sequence_ops/sequence_expand_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; template <typename T> __global__ void sequence_expand_kernel(const T* x_data, const size_t* x_lod, const size_t* ref_lod, const size_t* offset, const size_t lod_size, /* default=1, the instance length*/ const int x_item_length, T* out_data) { int bid = blockIdx.x; if (bid >= lod_size - 1) return; int x_item_count = x_lod[bid + 1] - x_lod[bid]; int repeats = ref_lod[bid + 1] - ref_lod[bid]; int out_offset = static_cast<int>(offset[bid]); int x_offset = x_lod[bid]; for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) { for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) { for (int tid_x = threadIdx.x; tid_x < x_item_length; tid_x += blockDim.x) { out_data[(out_offset + tid_z * x_item_count + tid_y) * x_item_length + tid_x] = x_data[(x_offset + tid_y) * x_item_length + tid_x]; } } } } template <typename T> __global__ void sequence_expand_grad_kernel( const T* dout_data, const size_t* ref_lod, const size_t* dx_lod, const size_t* offset, const size_t lod_size, /* default=1, the instance length*/ const int x_item_length, T* dx_data) { int bid = blockIdx.x; if (bid >= lod_size - 1) return; int x_item_count = dx_lod[bid + 1] - dx_lod[bid]; int repeats = ref_lod[bid + 1] - ref_lod[bid]; int out_offset = static_cast<int>(offset[bid]); int x_offset = dx_lod[bid]; for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) { for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) { for (int tid_x = threadIdx.x; tid_x < x_item_length; tid_x += blockDim.x) { platform::CudaAtomicAdd( &dx_data[(x_offset + tid_y) * x_item_length + tid_x], dout_data[(out_offset + tid_z * x_item_count + tid_y) * x_item_length + tid_x]); } } } } void GetOutputOffset(const framework::Vector<size_t>& x_lod, const framework::Vector<size_t>& ref_lod, framework::Vector<size_t>* out_offset) { size_t offset = 0; int lod_size = static_cast<int>(x_lod.size()); for (int i = 0; i < static_cast<int>(x_lod.size()); ++i) { (*out_offset)[i] = offset; if (i < lod_size - 1) { offset += (ref_lod[i + 1] - ref_lod[i]) * (x_lod[i + 1] - x_lod[i]); } } } template <typename T> static int ExpandByMemoryCopy(const platform::CUDADeviceContext& context, const LoDTensor& x, LoDTensor* out, const framework::Vector<size_t>& x_lod, const framework::Vector<size_t>& ref_lod, bool do_copy) { auto out_data = out->data<T>(); auto x_data = x.data<T>(); auto& gpu_place = boost::get<platform::CUDAPlace>(context.GetPlace()); int x_item_length = x.numel() / x.dims()[0]; int out_offset = 0; int num_copys = 0; for (size_t i = 1; i < ref_lod.size(); ++i) { int repeat_num = ref_lod[i] - ref_lod[i - 1]; int x_start = x_lod[i - 1]; int x_end = x_lod[i]; int x_seq_len = x_end - x_start; if (repeat_num > 0) { if (do_copy) { int out_start = out_offset; if (out->lod().size() == 1) { out_start = out->lod()[0][out_offset]; } for (int j = 0; j < repeat_num; j++) { for (int k = 0; k < x_seq_len; k++) { memory::Copy( gpu_place, out_data + (out_start + j * x_seq_len + k) * x_item_length, gpu_place, x_data + (x_start + k) * x_item_length, sizeof(T) * x_item_length, context.stream()); } } } else { num_copys += repeat_num * x_seq_len; } } out_offset += repeat_num; } return num_copys; } template <typename T> struct SequenceExpandFunctor<platform::CUDADeviceContext, T> { void operator()( const platform::CUDADeviceContext& context, const LoDTensor& x, const framework::Vector<size_t>& x_lod, /*expand source lod*/ const framework::Vector<size_t>& ref_lod, /*expand referenced lod*/ LoDTensor* out) { int num_copys = ExpandByMemoryCopy<T>(context, x, out, x_lod, ref_lod, false); // Sometimes direct copies will be faster, this maybe need deeply analysis. if (num_copys < 5) { ExpandByMemoryCopy<T>(context, x, out, x_lod, ref_lod, true); } else { int x_item_length = x.numel() / x.dims()[0]; size_t x_lod_size = x_lod.size(); framework::Vector<size_t> out_offset(x_lod_size * 2 + ref_lod.size()); GetOutputOffset(x_lod, ref_lod, &out_offset); for (size_t i = 0; i < x_lod_size; ++i) { out_offset[x_lod_size + i] = x_lod[i]; } for (size_t i = 0; i < ref_lod.size(); ++i) { out_offset[2 * x_lod_size + i] = ref_lod[i]; } const size_t* out_offset_data = out_offset.CUDAData(context.GetPlace()); const size_t* x_lod_data = out_offset_data + x_lod_size; const size_t* ref_lod_data = out_offset_data + 2 * x_lod_size; int thread_x = ::min(32, ::max(static_cast<int>(ref_lod.size()), 16)); int thread_y = 16; int thread_z = 1024 / thread_x / thread_y; int block_x = static_cast<int>(ref_lod.size()); dim3 block_size(thread_x, thread_y, thread_z); dim3 grid_size(block_x, 1); hipLaunchKernelGGL(( sequence_expand_kernel), dim3(grid_size), dim3(block_size), 0, context.stream(), x.data<T>(), x_lod_data, ref_lod_data, out_offset_data, x_lod_size, x_item_length, out->mutable_data<T>(context.GetPlace())); } } }; template <typename T> struct SequenceExpandGradFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const LoDTensor& dout, const framework::Vector<size_t>& x_lod, /*expand source lod*/ const framework::Vector<size_t>& ref_lod, /*expand based lod*/ LoDTensor* dx) { int x_item_length = framework::product(dx->dims()) / dx->dims()[0]; framework::Vector<size_t> out_offset(x_lod.size()); GetOutputOffset(x_lod, ref_lod, &out_offset); int thread_x = ::min(32, ::max(static_cast<int>(ref_lod.size()), 16)); int thread_y = 16; int thread_z = 1024 / thread_x / thread_y; int block_x = static_cast<int>(ref_lod.size()); dim3 block_size(thread_x, thread_y, thread_z); dim3 grid_size(block_x, 1); hipLaunchKernelGGL(( sequence_expand_grad_kernel), dim3(grid_size), dim3(block_size), 0, context.stream(), dout.data<T>(), ref_lod.CUDAData(context.GetPlace()), x_lod.CUDAData(context.GetPlace()), out_offset.CUDAData(context.GetPlace()), ref_lod.size(), x_item_length, dx->mutable_data<T>(context.GetPlace())); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( sequence_expand, ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, float>, ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, double>, ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, int>, ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( sequence_expand_grad, ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, float>, ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, double>, ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, int>, ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, int64_t>);
95ddb16d38362f9ae93a0b416db32b856c7990bb.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/sequence_ops/sequence_expand_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; template <typename T> __global__ void sequence_expand_kernel(const T* x_data, const size_t* x_lod, const size_t* ref_lod, const size_t* offset, const size_t lod_size, /* default=1, the instance length*/ const int x_item_length, T* out_data) { int bid = blockIdx.x; if (bid >= lod_size - 1) return; int x_item_count = x_lod[bid + 1] - x_lod[bid]; int repeats = ref_lod[bid + 1] - ref_lod[bid]; int out_offset = static_cast<int>(offset[bid]); int x_offset = x_lod[bid]; for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) { for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) { for (int tid_x = threadIdx.x; tid_x < x_item_length; tid_x += blockDim.x) { out_data[(out_offset + tid_z * x_item_count + tid_y) * x_item_length + tid_x] = x_data[(x_offset + tid_y) * x_item_length + tid_x]; } } } } template <typename T> __global__ void sequence_expand_grad_kernel( const T* dout_data, const size_t* ref_lod, const size_t* dx_lod, const size_t* offset, const size_t lod_size, /* default=1, the instance length*/ const int x_item_length, T* dx_data) { int bid = blockIdx.x; if (bid >= lod_size - 1) return; int x_item_count = dx_lod[bid + 1] - dx_lod[bid]; int repeats = ref_lod[bid + 1] - ref_lod[bid]; int out_offset = static_cast<int>(offset[bid]); int x_offset = dx_lod[bid]; for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) { for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) { for (int tid_x = threadIdx.x; tid_x < x_item_length; tid_x += blockDim.x) { platform::CudaAtomicAdd( &dx_data[(x_offset + tid_y) * x_item_length + tid_x], dout_data[(out_offset + tid_z * x_item_count + tid_y) * x_item_length + tid_x]); } } } } void GetOutputOffset(const framework::Vector<size_t>& x_lod, const framework::Vector<size_t>& ref_lod, framework::Vector<size_t>* out_offset) { size_t offset = 0; int lod_size = static_cast<int>(x_lod.size()); for (int i = 0; i < static_cast<int>(x_lod.size()); ++i) { (*out_offset)[i] = offset; if (i < lod_size - 1) { offset += (ref_lod[i + 1] - ref_lod[i]) * (x_lod[i + 1] - x_lod[i]); } } } template <typename T> static int ExpandByMemoryCopy(const platform::CUDADeviceContext& context, const LoDTensor& x, LoDTensor* out, const framework::Vector<size_t>& x_lod, const framework::Vector<size_t>& ref_lod, bool do_copy) { auto out_data = out->data<T>(); auto x_data = x.data<T>(); auto& gpu_place = boost::get<platform::CUDAPlace>(context.GetPlace()); int x_item_length = x.numel() / x.dims()[0]; int out_offset = 0; int num_copys = 0; for (size_t i = 1; i < ref_lod.size(); ++i) { int repeat_num = ref_lod[i] - ref_lod[i - 1]; int x_start = x_lod[i - 1]; int x_end = x_lod[i]; int x_seq_len = x_end - x_start; if (repeat_num > 0) { if (do_copy) { int out_start = out_offset; if (out->lod().size() == 1) { out_start = out->lod()[0][out_offset]; } for (int j = 0; j < repeat_num; j++) { for (int k = 0; k < x_seq_len; k++) { memory::Copy( gpu_place, out_data + (out_start + j * x_seq_len + k) * x_item_length, gpu_place, x_data + (x_start + k) * x_item_length, sizeof(T) * x_item_length, context.stream()); } } } else { num_copys += repeat_num * x_seq_len; } } out_offset += repeat_num; } return num_copys; } template <typename T> struct SequenceExpandFunctor<platform::CUDADeviceContext, T> { void operator()( const platform::CUDADeviceContext& context, const LoDTensor& x, const framework::Vector<size_t>& x_lod, /*expand source lod*/ const framework::Vector<size_t>& ref_lod, /*expand referenced lod*/ LoDTensor* out) { int num_copys = ExpandByMemoryCopy<T>(context, x, out, x_lod, ref_lod, false); // Sometimes direct copies will be faster, this maybe need deeply analysis. if (num_copys < 5) { ExpandByMemoryCopy<T>(context, x, out, x_lod, ref_lod, true); } else { int x_item_length = x.numel() / x.dims()[0]; size_t x_lod_size = x_lod.size(); framework::Vector<size_t> out_offset(x_lod_size * 2 + ref_lod.size()); GetOutputOffset(x_lod, ref_lod, &out_offset); for (size_t i = 0; i < x_lod_size; ++i) { out_offset[x_lod_size + i] = x_lod[i]; } for (size_t i = 0; i < ref_lod.size(); ++i) { out_offset[2 * x_lod_size + i] = ref_lod[i]; } const size_t* out_offset_data = out_offset.CUDAData(context.GetPlace()); const size_t* x_lod_data = out_offset_data + x_lod_size; const size_t* ref_lod_data = out_offset_data + 2 * x_lod_size; int thread_x = std::min(32, std::max(static_cast<int>(ref_lod.size()), 16)); int thread_y = 16; int thread_z = 1024 / thread_x / thread_y; int block_x = static_cast<int>(ref_lod.size()); dim3 block_size(thread_x, thread_y, thread_z); dim3 grid_size(block_x, 1); sequence_expand_kernel<<<grid_size, block_size, 0, context.stream()>>>( x.data<T>(), x_lod_data, ref_lod_data, out_offset_data, x_lod_size, x_item_length, out->mutable_data<T>(context.GetPlace())); } } }; template <typename T> struct SequenceExpandGradFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const LoDTensor& dout, const framework::Vector<size_t>& x_lod, /*expand source lod*/ const framework::Vector<size_t>& ref_lod, /*expand based lod*/ LoDTensor* dx) { int x_item_length = framework::product(dx->dims()) / dx->dims()[0]; framework::Vector<size_t> out_offset(x_lod.size()); GetOutputOffset(x_lod, ref_lod, &out_offset); int thread_x = std::min(32, std::max(static_cast<int>(ref_lod.size()), 16)); int thread_y = 16; int thread_z = 1024 / thread_x / thread_y; int block_x = static_cast<int>(ref_lod.size()); dim3 block_size(thread_x, thread_y, thread_z); dim3 grid_size(block_x, 1); sequence_expand_grad_kernel<<<grid_size, block_size, 0, context.stream()>>>( dout.data<T>(), ref_lod.CUDAData(context.GetPlace()), x_lod.CUDAData(context.GetPlace()), out_offset.CUDAData(context.GetPlace()), ref_lod.size(), x_item_length, dx->mutable_data<T>(context.GetPlace())); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( sequence_expand, ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, float>, ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, double>, ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, int>, ops::SequenceExpandKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( sequence_expand_grad, ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, float>, ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, double>, ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, int>, ops::SequenceExpandGradKernel<paddle::platform::CUDADeviceContext, int64_t>);
5f5f8dabf9b84e50f222965baf02cde8b8f11914.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/sort.h> #include <thrust/tuple.h> #include "include/hip/hip_fp16.h" #include "include/cuda_runtime.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/list_diff_impl.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" struct is_selected { __host__ __device__ bool operator()(const bool x) { return x == false; } }; template <typename T, typename S> int CalListDiff(size_t x_size, size_t y_size, const T *x, const T *y, T *out, S *idx, T *workspace_y, S *workspace_xidx, bool *workspace_flag, const uint32_t &device_id, hipStream_t cuda_stream) { int count_out = 0; auto policy = thrust::hip::par.on(cuda_stream); hipMemcpy(workspace_y, y, y_size * sizeof(T), hipMemcpyDeviceToDevice); thrust::sequence(policy, thrust::device_pointer_cast(workspace_xidx), thrust::device_pointer_cast(workspace_xidx) + x_size); thrust::stable_sort(policy, thrust::device_pointer_cast(workspace_y), thrust::device_pointer_cast(workspace_y) + y_size); thrust::binary_search(thrust::device_pointer_cast(workspace_y), thrust::device_pointer_cast(workspace_y) + y_size, thrust::device_pointer_cast(x), thrust::device_pointer_cast(x) + x_size, thrust::device_pointer_cast(workspace_flag)); count_out = thrust::count(policy, thrust::device_pointer_cast(workspace_flag), thrust::device_pointer_cast(workspace_flag) + x_size, false); thrust::copy_if( policy, thrust::make_zip_iterator( thrust::make_tuple(thrust::device_pointer_cast(workspace_xidx), thrust::device_pointer_cast(x))), thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(workspace_xidx) + x_size, thrust::device_pointer_cast(x) + x_size)), thrust::device_pointer_cast(workspace_flag), thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(idx), thrust::device_pointer_cast(out))), is_selected()); return count_out; } template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const half *, const half *, half *, int64_t *, half *, int64_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const float *, const float *, float *, int64_t *, float *, int64_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const double *, const double *, double *, int64_t *, double *, int64_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const uint8_t *, const uint8_t *, uint8_t *, int64_t *, uint8_t *, int64_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const uint16_t *, const uint16_t *, uint16_t *, int64_t *, uint16_t *, int64_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int8_t *, const int8_t *, int8_t *, int64_t *, int8_t *, int64_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int16_t *, const int16_t *, int16_t *, int64_t *, int16_t *, int64_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int32_t *, const int32_t *, int32_t *, int64_t *, int32_t *, int64_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int64_t *, const int64_t *, int64_t *, int64_t *, int64_t *, int64_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const half *, const half *, half *, int32_t *, half *, int32_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const float *, const float *, float *, int32_t *, float *, int32_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const double *, const double *, double *, int32_t *, double *, int32_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const uint8_t *, const uint8_t *, uint8_t *, int32_t *, uint8_t *, int32_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const uint16_t *, const uint16_t *, uint16_t *, int32_t *, uint16_t *, int32_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int8_t *, const int8_t *, int8_t *, int32_t *, int8_t *, int32_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int16_t *, const int16_t *, int16_t *, int32_t *, int16_t *, int32_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int32_t *, const int32_t *, int32_t *, int32_t *, int32_t *, int32_t *, bool *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int64_t *, const int64_t *, int64_t *, int32_t *, int64_t *, int32_t *, bool *, const uint32_t &, hipStream_t cuda_stream);
5f5f8dabf9b84e50f222965baf02cde8b8f11914.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/sort.h> #include <thrust/tuple.h> #include "include/cuda_fp16.h" #include "include/cuda_runtime.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/list_diff_impl.cuh" #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" struct is_selected { __host__ __device__ bool operator()(const bool x) { return x == false; } }; template <typename T, typename S> int CalListDiff(size_t x_size, size_t y_size, const T *x, const T *y, T *out, S *idx, T *workspace_y, S *workspace_xidx, bool *workspace_flag, const uint32_t &device_id, cudaStream_t cuda_stream) { int count_out = 0; auto policy = thrust::cuda::par.on(cuda_stream); cudaMemcpy(workspace_y, y, y_size * sizeof(T), cudaMemcpyDeviceToDevice); thrust::sequence(policy, thrust::device_pointer_cast(workspace_xidx), thrust::device_pointer_cast(workspace_xidx) + x_size); thrust::stable_sort(policy, thrust::device_pointer_cast(workspace_y), thrust::device_pointer_cast(workspace_y) + y_size); thrust::binary_search(thrust::device_pointer_cast(workspace_y), thrust::device_pointer_cast(workspace_y) + y_size, thrust::device_pointer_cast(x), thrust::device_pointer_cast(x) + x_size, thrust::device_pointer_cast(workspace_flag)); count_out = thrust::count(policy, thrust::device_pointer_cast(workspace_flag), thrust::device_pointer_cast(workspace_flag) + x_size, false); thrust::copy_if( policy, thrust::make_zip_iterator( thrust::make_tuple(thrust::device_pointer_cast(workspace_xidx), thrust::device_pointer_cast(x))), thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(workspace_xidx) + x_size, thrust::device_pointer_cast(x) + x_size)), thrust::device_pointer_cast(workspace_flag), thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(idx), thrust::device_pointer_cast(out))), is_selected()); return count_out; } template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const half *, const half *, half *, int64_t *, half *, int64_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const float *, const float *, float *, int64_t *, float *, int64_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const double *, const double *, double *, int64_t *, double *, int64_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const uint8_t *, const uint8_t *, uint8_t *, int64_t *, uint8_t *, int64_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const uint16_t *, const uint16_t *, uint16_t *, int64_t *, uint16_t *, int64_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int8_t *, const int8_t *, int8_t *, int64_t *, int8_t *, int64_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int16_t *, const int16_t *, int16_t *, int64_t *, int16_t *, int64_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int32_t *, const int32_t *, int32_t *, int64_t *, int32_t *, int64_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int64_t *, const int64_t *, int64_t *, int64_t *, int64_t *, int64_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const half *, const half *, half *, int32_t *, half *, int32_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const float *, const float *, float *, int32_t *, float *, int32_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const double *, const double *, double *, int32_t *, double *, int32_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const uint8_t *, const uint8_t *, uint8_t *, int32_t *, uint8_t *, int32_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const uint16_t *, const uint16_t *, uint16_t *, int32_t *, uint16_t *, int32_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int8_t *, const int8_t *, int8_t *, int32_t *, int8_t *, int32_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int16_t *, const int16_t *, int16_t *, int32_t *, int16_t *, int32_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int32_t *, const int32_t *, int32_t *, int32_t *, int32_t *, int32_t *, bool *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT int CalListDiff(size_t, size_t, const int64_t *, const int64_t *, int64_t *, int32_t *, int64_t *, int32_t *, bool *, const uint32_t &, cudaStream_t cuda_stream);
TRBVH.hip
// !!! This is a file automatically generated by hipify!!! #include "TRBVH.h" #include <hip/hip_runtime_api.h> #include <math_functions.h> #include <device_launch_parameters.h> #include "Commons.cuh" #include "TimeKernelExecution.h" #include "Treelet.cuh" #include <cfloat> #define WARP_SIZE 32 #define GLOBAL_WARP_INDEX static_cast<int>((threadIdx.x + blockIdx.x * blockDim.x) / WARP_SIZE) #define WARP_INDEX static_cast<int>(threadIdx.x / WARP_SIZE) #define WARP_ARRAY(source, elementsPerWarp) ((source) + WARP_INDEX * (elementsPerWarp)) #define WARP_ARRAY_INDEX(index, elementsPerWarp) (WARP_INDEX * (elementsPerWarp) + (index)) #define THREAD_WARP_INDEX (threadIdx.x & (WARP_SIZE - 1)) namespace BVHRT { __device__ void reduceOptimal(float& optimalCost, int& optimalMask, int numberOfValues) { for (int i = numberOfValues >> 1; i > 0; i = (i >> 1)) { float otherValue = __shfl_down(optimalCost, i); int otherMask = __shfl_down(optimalMask, i); if (otherValue < optimalCost) { optimalCost = otherValue; optimalMask = otherMask; } } } __device__ void calculateSubsetSurfaceAreas(int treeletSize, BVHTree* tree, int* treeletLeaves, float* subsetAreas, float4* boundingBoxesMin, float4* boundingBoxesMax, float* costs) { float bbMin[3], bbMax[3]; if (THREAD_WARP_INDEX < treeletSize) { floatArrayFromFloat4(tree->BoundingBoxMin(treeletLeaves[THREAD_WARP_INDEX]), bbMin); floatArrayFromFloat4(tree->BoundingBoxMax(treeletLeaves[THREAD_WARP_INDEX]), bbMax); } // The 5 most significative bits are common ammong the thread's subsets int subset = THREAD_WARP_INDEX * 4; float3 baseMin, baseMax; baseMin.x = FLT_MAX; baseMin.y = FLT_MAX; baseMin.z = FLT_MAX; baseMax.x = -FLT_MAX; baseMax.y = -FLT_MAX; baseMax.z = -FLT_MAX; for (int i = (treeletSize - 5); i < treeletSize; ++i) { float3 leafBbMin, leafBbMax; SHFL_FLOAT3(leafBbMin, bbMin, i); SHFL_FLOAT3(leafBbMax, bbMax, i); if (subset & (1 << i)) { expandBoundingBox(baseMin, baseMax, leafBbMin, leafBbMax); } } int iterations = max(1, 1 << (treeletSize - 5)); // Num elements / 32, rounded up for (int j = 0; j < iterations; ++j) { float3 subsetMin, subsetMax; subsetMin.x = baseMin.x; subsetMin.y = baseMin.y; subsetMin.z = baseMin.z; subsetMax.x = baseMax.x; subsetMax.y = baseMax.y; subsetMax.z = baseMax.z; for (int i = 0; i < (treeletSize - 5); ++i) { float3 leafBbMin, leafBbMax; SHFL_FLOAT3(leafBbMin, bbMin, i); SHFL_FLOAT3(leafBbMax, bbMax, i); if (subset & (1 << i)) { expandBoundingBox(subsetMin, subsetMax, leafBbMin, leafBbMax); } } // Store bounding boxes and their surface areas int position = (1 << treeletSize) * GLOBAL_WARP_INDEX + subset; boundingBoxesMin[position] = float4FromFloat3(subsetMin); boundingBoxesMax[position] = float4FromFloat3(subsetMax); float subsetArea = calculateBoundingBoxSurfaceArea(subsetMin, subsetMax); subsetAreas[position] = subsetArea; costs[subset] = subsetArea; ++subset; } } __device__ void processSchedule(int numberOfRounds, int* schedule, float* costs, char* partitionMasks, int treeletTriangles, float ci, float ct) { for (int j = 0; j < numberOfRounds; ++j) { int subset = schedule[THREAD_WARP_INDEX + j * WARP_SIZE]; if (subset != 0) { // Process all possible partitions of the subset float optimalCost = FLT_MAX; int optimalPartition = 0; int delta = (subset - 1) & subset; int partition = (-delta) & subset; float partitionCost; while (partition != 0) { partitionCost = costs[partition] + costs[partition ^ subset]; if (partitionCost < optimalCost) { optimalCost = partitionCost; optimalPartition = partition; } partition = (partition - delta) & subset; } // Calculate subset SAH. Keep whichever has a lower SAH between collapsing // the subset treelet or leaving it as is costs[subset] = min(ci * costs[subset] + optimalCost, ct * costs[subset] * treeletTriangles); partitionMasks[subset] = static_cast<char>(optimalPartition); } WARP_SYNC; } } __device__ void processSubsets(int treeletSize, int treeletTriangles, float* costs, char* partitionMasks, float ci, float ct) { // Process subsets of size treeletSize-1. Each 4 threads will process a subset. There // are treeletSize subsets. if (THREAD_WARP_INDEX < 4 * treeletSize) { // To find the nth subset of treeletSize-1 elements, start with a sequence of // treeletSize ones and set the nth bit to 0 int subset = ((1 << treeletSize) - 1) & (~(1 << (THREAD_WARP_INDEX / 4))); // To assemble the partitions of nth subset of size treeletSize-1, we create a // mask to split that subset before the (n-1)th least significant bit. We then // get the left part of the masked base number and shift left by one (thus adding // the partition's 0). The last step is to OR the result with the right part of // the masked number and shift one more time to set the least significant bit to // 0. Below is an example for a treelet size of 7: // subset = 1110111 (7 bits) // base = abcde (5 bits) // partition = abc0de0 (7 bits) // The cast to int is required so max does not return the wrong value int leftMask = -(1 << max(static_cast<int>((THREAD_WARP_INDEX / 4) - 1), 0)); // x & 3 == x % 4 int partitionBase = (THREAD_WARP_INDEX & 3) + 1; float optimalCost = FLT_MAX; int optimalPartition = 0; int numberOfPartitions = (1 << (treeletSize - 2)) - 1; int partition = (((partitionBase & leftMask) << 1) | (partitionBase & ~leftMask)) << 1; for (int j = (THREAD_WARP_INDEX & 3); j < numberOfPartitions; j += 4) { float partitionCost = costs[partition] + costs[partition ^ subset]; if (partitionCost < optimalCost) { optimalCost = partitionCost; optimalPartition = partition; } partitionBase += 4; partition = (((partitionBase & leftMask) << 1) | (partitionBase & ~leftMask)) << 1; } reduceOptimal(optimalCost, optimalPartition, 4); if ((THREAD_WARP_INDEX & 3) == 0) { // Calculate subset SAH. Keep whichever has a lower SAH between collapsing // the subset treelet or leaving it as is costs[subset] = min(ci * costs[subset] + optimalCost, ct * costs[subset] * treeletTriangles); partitionMasks[subset] = static_cast<char>(optimalPartition); } } WARP_SYNC; // Process subsets of size treeletSize float optimalCost = FLT_MAX; int optimalPartition = 0; int subset = (1 << treeletSize) - 1; int partition = (THREAD_WARP_INDEX + 1) * 2; int numberOfPartitions = (1 << (treeletSize - 1)) - 1; for (int j = THREAD_WARP_INDEX; j < numberOfPartitions; j += 32) { float partitionCost = costs[partition] + costs[partition ^ subset]; if (partitionCost < optimalCost) { optimalCost = partitionCost; optimalPartition = partition; } partition += 64; } reduceOptimal(optimalCost, optimalPartition, WARP_SIZE); if (THREAD_WARP_INDEX == 0) { // Calculate subset SAH. Keep whichever has a lower SAH between collapsing // the subset treelet or leaving it as is costs[subset] = min(ci * costs[subset] + optimalCost, ct * costs[subset] * treeletTriangles); partitionMasks[subset] = static_cast<char>(optimalPartition); } } __device__ void updateTreelet(int treeletSize, BVHTree* tree, int* treeletInternalNodes, int* treeletLeaves, float* subsetAreas, float* costs, char* partitionMasks, float* nodesSah, float4* boundingBoxesMin, float4* boundingBoxesMax, int* stackNode, char* stackMask, int* stackSize, int* currentInternalNode) { int globalWarpIndex = GLOBAL_WARP_INDEX; if (costs[(1 << treeletSize) - 1] < nodesSah[treeletInternalNodes[0]]) { if (THREAD_WARP_INDEX == 0) { stackNode[globalWarpIndex * (treeletSize - 1)] = treeletInternalNodes[0]; stackMask[globalWarpIndex * (treeletSize - 1)] = static_cast<char>((1 << treeletSize) - 1); stackSize[globalWarpIndex] = 1; currentInternalNode[globalWarpIndex] = 1; } while (stackSize[globalWarpIndex] > 0) { int lastStackSize = stackSize[globalWarpIndex]; if (THREAD_WARP_INDEX == 0) { stackSize[globalWarpIndex] = 0; } if (THREAD_WARP_INDEX < lastStackSize) { int nodeSubset = stackMask [globalWarpIndex * (treeletSize - 1) + THREAD_WARP_INDEX]; char partition = partitionMasks[nodeSubset]; char partitionComplement = partition ^ nodeSubset; int subsetRoot = stackNode[globalWarpIndex * (treeletSize - 1) + THREAD_WARP_INDEX]; int childIndex; if (__popc(partition) > 1) { // Update node pointers int currentNode = atomicAdd(currentInternalNode + globalWarpIndex, 1); childIndex = treeletInternalNodes[currentNode]; tree->SetLeftIndex(subsetRoot, childIndex); tree->SetParentIndex(childIndex, subsetRoot); int position = (1 << treeletSize) * globalWarpIndex + partition; float4 bbMin = boundingBoxesMin[position]; float4 bbMax = boundingBoxesMax[position]; float area = calculateBoundingBoxSurfaceArea(bbMin, bbMax); // Update node area and bounding box tree->SetBoundingBoxMin(childIndex, bbMin); tree->SetBoundingBoxMax(childIndex, bbMax); tree->SetArea(childIndex, area); nodesSah[childIndex] = costs[partition]; // Add child to stack int stackIndex = atomicAdd(stackSize + globalWarpIndex, 1); stackNode[globalWarpIndex * (treeletSize - 1) + stackIndex] = childIndex; stackMask[globalWarpIndex * (treeletSize - 1) + stackIndex] = partition; } else { childIndex = treeletLeaves[__ffs(partition) - 1]; tree->SetLeftIndex(subsetRoot, childIndex); tree->SetParentIndex(childIndex, subsetRoot); } if (__popc(partitionComplement) > 1) { // Update node pointers int currentNode = atomicAdd(currentInternalNode + globalWarpIndex, 1); int childIndex = treeletInternalNodes[currentNode]; tree->SetRightIndex(subsetRoot, childIndex); tree->SetParentIndex(childIndex, subsetRoot); int position = (1 << treeletSize) * globalWarpIndex + partitionComplement; float4 bbMin = boundingBoxesMin[position]; float4 bbMax = boundingBoxesMax[position]; float area = calculateBoundingBoxSurfaceArea(bbMin, bbMax); // Update node area and bounding box tree->SetBoundingBoxMin(childIndex, bbMin); tree->SetBoundingBoxMax(childIndex, bbMax); tree->SetArea(childIndex, area); nodesSah[childIndex] = costs[partition]; // Add child to stack int stackIndex = atomicAdd(stackSize + globalWarpIndex, 1); stackNode[globalWarpIndex * (treeletSize - 1) + stackIndex] = childIndex; stackMask[globalWarpIndex * (treeletSize - 1) + stackIndex] = partitionComplement; } else { int childIndex = treeletLeaves[__ffs(partitionComplement) - 1]; tree->SetRightIndex(subsetRoot, childIndex); tree->SetParentIndex(childIndex, subsetRoot); } } } } } __global__ void LAUNCH_BOUNDS(128, 12) treeletReestructureKernel(unsigned int numberOfTriangles, BVHTree* tree, float* nodesSah, int treeletSize, int* subtreeTriangles, unsigned int* counters, int gamma, int* schedule, int numberOfRounds, float4* boundingBoxesMin, float4* boundingBoxesMax, float* subsetAreas, int* stackNode, char* stackMask, int* stackSize, int* currentInternalNode, float ci, float ct) { // Split the pre-allocated shared memory into distinct arrays for our treelet extern __shared__ int sharedMemory[]; __shared__ int* treeletInternalNodes; __shared__ int* treeletLeaves; __shared__ float* treeletLeavesAreas; __shared__ float* costs; __shared__ char* partitionMasks; // Having only the first thread perform this assignments and then // synchronizing is actually slower than issuing the assignments on all threads int numberOfWarps = blockDim.x / WARP_SIZE; if (THREAD_WARP_INDEX == 0) { treeletInternalNodes = sharedMemory; treeletLeaves = treeletInternalNodes + (treeletSize - 1) * numberOfWarps; treeletLeavesAreas = (float*)(treeletLeaves + treeletSize * numberOfWarps); costs = treeletLeavesAreas + treeletSize * numberOfWarps; partitionMasks = (char*)(costs + (1 << treeletSize) * numberOfWarps); } __syncthreads(); // If this flag is set, the thread will be excluded from the bottom up traversal, but will // still be available to help form and optimiza treelets const int threadIndex = threadIdx.x + blockIdx.x * blockDim.x; // Initialize leaves int currentNodeIndex; if (threadIndex < numberOfTriangles) { int leafIndex = threadIndex + numberOfTriangles - 1; float area = tree->Area(leafIndex); currentNodeIndex = tree->ParentIndex(leafIndex); subtreeTriangles[leafIndex] = 1; nodesSah[leafIndex] = ct * area; } else { currentNodeIndex = -1; } while (__ballot(currentNodeIndex >= 0) != 0) { // Number of threads who already have processed the current node unsigned int counter = 0; if (currentNodeIndex >= 0) { counter = atomicAdd(&counters[currentNodeIndex], 1); // Only the last thread to arrive is allowed to process the current node. This ensures // that both its children will already have been processed if (counter == 0) { currentNodeIndex = -1; } } // How many triangles can be reached by the subtree with root at the current node int triangleCount = 0; if (counter != 0) { // Throughout the code, blocks that have loads separated from stores are so organized // in order to increase ILP (Instruction level parallelism) int left = tree->LeftIndex(currentNodeIndex); int right = tree->RightIndex(currentNodeIndex); float area = tree->Area(currentNodeIndex); int trianglesLeft = subtreeTriangles[left]; float sahLeft = nodesSah[left]; int trianglesRight = subtreeTriangles[right]; float sahRight = nodesSah[right]; triangleCount = trianglesLeft + trianglesRight; subtreeTriangles[currentNodeIndex] = triangleCount; nodesSah[currentNodeIndex] = ci * area + sahLeft + sahRight; } // Check which threads in the warp have treelets to be processed. We are only going to // process a treelet if the current node is the root of a subtree with at least gamma // triangles unsigned int vote = __ballot(triangleCount >= gamma); while (vote != 0) { // Get the thread index for the treelet that will be processed int rootThreadIndex = __ffs(vote) - 1; // Get the treelet root by reading the corresponding thread's currentNodeIndex private // variable int treeletRootIndex = __shfl(currentNodeIndex, rootThreadIndex); formTreelet(treeletRootIndex, numberOfTriangles, tree, treeletSize, WARP_ARRAY(treeletInternalNodes, treeletSize - 1), WARP_ARRAY(treeletLeaves, treeletSize), WARP_ARRAY(treeletLeavesAreas, treeletSize)); // Optimize treelet calculateSubsetSurfaceAreas(treeletSize, tree, WARP_ARRAY(treeletLeaves, treeletSize), subsetAreas, boundingBoxesMin, boundingBoxesMax, WARP_ARRAY(costs, (1 << treeletSize))); // Set leaves cost if (THREAD_WARP_INDEX < treeletSize) { int leafIndex = WARP_ARRAY_INDEX(1 << THREAD_WARP_INDEX, 1 << treeletSize); int treeletLeafIndex = WARP_ARRAY_INDEX(THREAD_WARP_INDEX, treeletSize); costs[leafIndex] = nodesSah[treeletLeaves[treeletLeafIndex]]; } WARP_SYNC; int treeletTriangles = subtreeTriangles[treeletInternalNodes[WARP_ARRAY_INDEX(0, treeletSize - 1)]]; // Process subsets of sizes 2 to treeletSize-2 using the schedule processSchedule(numberOfRounds, schedule, WARP_ARRAY(costs, (1 << treeletSize)), WARP_ARRAY(partitionMasks, (1 << treeletSize)), treeletTriangles, ci, ct); WARP_SYNC; // Procecss remaining subsets processSubsets(treeletSize, treeletTriangles, WARP_ARRAY(costs, (1 << treeletSize)), WARP_ARRAY(partitionMasks, (1 << treeletSize)), ci, ct); WARP_SYNC; updateTreelet(treeletSize, tree, WARP_ARRAY(treeletInternalNodes, treeletSize - 1), WARP_ARRAY(treeletLeaves, treeletSize), subsetAreas, WARP_ARRAY(costs, (1 << treeletSize)), WARP_ARRAY(partitionMasks, (1 << treeletSize)), nodesSah, boundingBoxesMin, boundingBoxesMax, stackNode, stackMask, stackSize, currentInternalNode); // Update vote so each treelet is only processed once (set the bit that represents the // treelet that will be processed back to 0) vote &= ~(1 << rootThreadIndex); } // Update current node pointer if (currentNodeIndex >= 0) { currentNodeIndex = tree->ParentIndex(currentNodeIndex); } } } float DeviceTreeletReestructureOptimizer(unsigned int numberOfTriangles, BVHTree* tree, unsigned int* counters, int* subtreeTrianglesCount, float* nodesSah, int treeletSize, int gamma, int* schedule, int numberOfRounds, float4* boundingBoxesMin, float4* boundingBoxesMax, float* subsetAreas, int* stackNode, char* stackMask, int* stackSize, int* currentInternalNode, float ci, float ct) { dim3 blockSize(128, 1, 1); dim3 gridSize((numberOfTriangles + (blockSize.x - 1)) / blockSize.x, 1, 1); size_t treeletMemorySize = static_cast<size_t>((2 * treeletSize - 1) * sizeof(int) + treeletSize * sizeof(float)); size_t costAndMaskSize = static_cast<size_t>( (1 << treeletSize) * sizeof(float) + (1 << treeletSize) * sizeof(char)); size_t sharedMemorySize = static_cast<size_t>( (treeletMemorySize + costAndMaskSize) * (blockSize.x / 32)); hipFuncSetCacheConfig(treeletReestructureKernel, hipFuncCachePreferShared); return TimeKernelExecution([&]() { hipLaunchKernelGGL(( treeletReestructureKernel), dim3(gridSize), dim3(blockSize), sharedMemorySize, 0, numberOfTriangles, tree, nodesSah, treeletSize, subtreeTrianglesCount, counters, gamma, schedule, numberOfRounds, boundingBoxesMin, boundingBoxesMax, subsetAreas, stackNode, stackMask, stackSize, currentInternalNode, ci, ct); }); } }
TRBVH.cu
#include "TRBVH.h" #include <cuda_runtime_api.h> #include <math_functions.h> #include <device_launch_parameters.h> #include "Commons.cuh" #include "TimeKernelExecution.h" #include "Treelet.cuh" #include <cfloat> #define WARP_SIZE 32 #define GLOBAL_WARP_INDEX static_cast<int>((threadIdx.x + blockIdx.x * blockDim.x) / WARP_SIZE) #define WARP_INDEX static_cast<int>(threadIdx.x / WARP_SIZE) #define WARP_ARRAY(source, elementsPerWarp) ((source) + WARP_INDEX * (elementsPerWarp)) #define WARP_ARRAY_INDEX(index, elementsPerWarp) (WARP_INDEX * (elementsPerWarp) + (index)) #define THREAD_WARP_INDEX (threadIdx.x & (WARP_SIZE - 1)) namespace BVHRT { __device__ void reduceOptimal(float& optimalCost, int& optimalMask, int numberOfValues) { for (int i = numberOfValues >> 1; i > 0; i = (i >> 1)) { float otherValue = __shfl_down(optimalCost, i); int otherMask = __shfl_down(optimalMask, i); if (otherValue < optimalCost) { optimalCost = otherValue; optimalMask = otherMask; } } } __device__ void calculateSubsetSurfaceAreas(int treeletSize, BVHTree* tree, int* treeletLeaves, float* subsetAreas, float4* boundingBoxesMin, float4* boundingBoxesMax, float* costs) { float bbMin[3], bbMax[3]; if (THREAD_WARP_INDEX < treeletSize) { floatArrayFromFloat4(tree->BoundingBoxMin(treeletLeaves[THREAD_WARP_INDEX]), bbMin); floatArrayFromFloat4(tree->BoundingBoxMax(treeletLeaves[THREAD_WARP_INDEX]), bbMax); } // The 5 most significative bits are common ammong the thread's subsets int subset = THREAD_WARP_INDEX * 4; float3 baseMin, baseMax; baseMin.x = FLT_MAX; baseMin.y = FLT_MAX; baseMin.z = FLT_MAX; baseMax.x = -FLT_MAX; baseMax.y = -FLT_MAX; baseMax.z = -FLT_MAX; for (int i = (treeletSize - 5); i < treeletSize; ++i) { float3 leafBbMin, leafBbMax; SHFL_FLOAT3(leafBbMin, bbMin, i); SHFL_FLOAT3(leafBbMax, bbMax, i); if (subset & (1 << i)) { expandBoundingBox(baseMin, baseMax, leafBbMin, leafBbMax); } } int iterations = max(1, 1 << (treeletSize - 5)); // Num elements / 32, rounded up for (int j = 0; j < iterations; ++j) { float3 subsetMin, subsetMax; subsetMin.x = baseMin.x; subsetMin.y = baseMin.y; subsetMin.z = baseMin.z; subsetMax.x = baseMax.x; subsetMax.y = baseMax.y; subsetMax.z = baseMax.z; for (int i = 0; i < (treeletSize - 5); ++i) { float3 leafBbMin, leafBbMax; SHFL_FLOAT3(leafBbMin, bbMin, i); SHFL_FLOAT3(leafBbMax, bbMax, i); if (subset & (1 << i)) { expandBoundingBox(subsetMin, subsetMax, leafBbMin, leafBbMax); } } // Store bounding boxes and their surface areas int position = (1 << treeletSize) * GLOBAL_WARP_INDEX + subset; boundingBoxesMin[position] = float4FromFloat3(subsetMin); boundingBoxesMax[position] = float4FromFloat3(subsetMax); float subsetArea = calculateBoundingBoxSurfaceArea(subsetMin, subsetMax); subsetAreas[position] = subsetArea; costs[subset] = subsetArea; ++subset; } } __device__ void processSchedule(int numberOfRounds, int* schedule, float* costs, char* partitionMasks, int treeletTriangles, float ci, float ct) { for (int j = 0; j < numberOfRounds; ++j) { int subset = schedule[THREAD_WARP_INDEX + j * WARP_SIZE]; if (subset != 0) { // Process all possible partitions of the subset float optimalCost = FLT_MAX; int optimalPartition = 0; int delta = (subset - 1) & subset; int partition = (-delta) & subset; float partitionCost; while (partition != 0) { partitionCost = costs[partition] + costs[partition ^ subset]; if (partitionCost < optimalCost) { optimalCost = partitionCost; optimalPartition = partition; } partition = (partition - delta) & subset; } // Calculate subset SAH. Keep whichever has a lower SAH between collapsing // the subset treelet or leaving it as is costs[subset] = min(ci * costs[subset] + optimalCost, ct * costs[subset] * treeletTriangles); partitionMasks[subset] = static_cast<char>(optimalPartition); } WARP_SYNC; } } __device__ void processSubsets(int treeletSize, int treeletTriangles, float* costs, char* partitionMasks, float ci, float ct) { // Process subsets of size treeletSize-1. Each 4 threads will process a subset. There // are treeletSize subsets. if (THREAD_WARP_INDEX < 4 * treeletSize) { // To find the nth subset of treeletSize-1 elements, start with a sequence of // treeletSize ones and set the nth bit to 0 int subset = ((1 << treeletSize) - 1) & (~(1 << (THREAD_WARP_INDEX / 4))); // To assemble the partitions of nth subset of size treeletSize-1, we create a // mask to split that subset before the (n-1)th least significant bit. We then // get the left part of the masked base number and shift left by one (thus adding // the partition's 0). The last step is to OR the result with the right part of // the masked number and shift one more time to set the least significant bit to // 0. Below is an example for a treelet size of 7: // subset = 1110111 (7 bits) // base = abcde (5 bits) // partition = abc0de0 (7 bits) // The cast to int is required so max does not return the wrong value int leftMask = -(1 << max(static_cast<int>((THREAD_WARP_INDEX / 4) - 1), 0)); // x & 3 == x % 4 int partitionBase = (THREAD_WARP_INDEX & 3) + 1; float optimalCost = FLT_MAX; int optimalPartition = 0; int numberOfPartitions = (1 << (treeletSize - 2)) - 1; int partition = (((partitionBase & leftMask) << 1) | (partitionBase & ~leftMask)) << 1; for (int j = (THREAD_WARP_INDEX & 3); j < numberOfPartitions; j += 4) { float partitionCost = costs[partition] + costs[partition ^ subset]; if (partitionCost < optimalCost) { optimalCost = partitionCost; optimalPartition = partition; } partitionBase += 4; partition = (((partitionBase & leftMask) << 1) | (partitionBase & ~leftMask)) << 1; } reduceOptimal(optimalCost, optimalPartition, 4); if ((THREAD_WARP_INDEX & 3) == 0) { // Calculate subset SAH. Keep whichever has a lower SAH between collapsing // the subset treelet or leaving it as is costs[subset] = min(ci * costs[subset] + optimalCost, ct * costs[subset] * treeletTriangles); partitionMasks[subset] = static_cast<char>(optimalPartition); } } WARP_SYNC; // Process subsets of size treeletSize float optimalCost = FLT_MAX; int optimalPartition = 0; int subset = (1 << treeletSize) - 1; int partition = (THREAD_WARP_INDEX + 1) * 2; int numberOfPartitions = (1 << (treeletSize - 1)) - 1; for (int j = THREAD_WARP_INDEX; j < numberOfPartitions; j += 32) { float partitionCost = costs[partition] + costs[partition ^ subset]; if (partitionCost < optimalCost) { optimalCost = partitionCost; optimalPartition = partition; } partition += 64; } reduceOptimal(optimalCost, optimalPartition, WARP_SIZE); if (THREAD_WARP_INDEX == 0) { // Calculate subset SAH. Keep whichever has a lower SAH between collapsing // the subset treelet or leaving it as is costs[subset] = min(ci * costs[subset] + optimalCost, ct * costs[subset] * treeletTriangles); partitionMasks[subset] = static_cast<char>(optimalPartition); } } __device__ void updateTreelet(int treeletSize, BVHTree* tree, int* treeletInternalNodes, int* treeletLeaves, float* subsetAreas, float* costs, char* partitionMasks, float* nodesSah, float4* boundingBoxesMin, float4* boundingBoxesMax, int* stackNode, char* stackMask, int* stackSize, int* currentInternalNode) { int globalWarpIndex = GLOBAL_WARP_INDEX; if (costs[(1 << treeletSize) - 1] < nodesSah[treeletInternalNodes[0]]) { if (THREAD_WARP_INDEX == 0) { stackNode[globalWarpIndex * (treeletSize - 1)] = treeletInternalNodes[0]; stackMask[globalWarpIndex * (treeletSize - 1)] = static_cast<char>((1 << treeletSize) - 1); stackSize[globalWarpIndex] = 1; currentInternalNode[globalWarpIndex] = 1; } while (stackSize[globalWarpIndex] > 0) { int lastStackSize = stackSize[globalWarpIndex]; if (THREAD_WARP_INDEX == 0) { stackSize[globalWarpIndex] = 0; } if (THREAD_WARP_INDEX < lastStackSize) { int nodeSubset = stackMask [globalWarpIndex * (treeletSize - 1) + THREAD_WARP_INDEX]; char partition = partitionMasks[nodeSubset]; char partitionComplement = partition ^ nodeSubset; int subsetRoot = stackNode[globalWarpIndex * (treeletSize - 1) + THREAD_WARP_INDEX]; int childIndex; if (__popc(partition) > 1) { // Update node pointers int currentNode = atomicAdd(currentInternalNode + globalWarpIndex, 1); childIndex = treeletInternalNodes[currentNode]; tree->SetLeftIndex(subsetRoot, childIndex); tree->SetParentIndex(childIndex, subsetRoot); int position = (1 << treeletSize) * globalWarpIndex + partition; float4 bbMin = boundingBoxesMin[position]; float4 bbMax = boundingBoxesMax[position]; float area = calculateBoundingBoxSurfaceArea(bbMin, bbMax); // Update node area and bounding box tree->SetBoundingBoxMin(childIndex, bbMin); tree->SetBoundingBoxMax(childIndex, bbMax); tree->SetArea(childIndex, area); nodesSah[childIndex] = costs[partition]; // Add child to stack int stackIndex = atomicAdd(stackSize + globalWarpIndex, 1); stackNode[globalWarpIndex * (treeletSize - 1) + stackIndex] = childIndex; stackMask[globalWarpIndex * (treeletSize - 1) + stackIndex] = partition; } else { childIndex = treeletLeaves[__ffs(partition) - 1]; tree->SetLeftIndex(subsetRoot, childIndex); tree->SetParentIndex(childIndex, subsetRoot); } if (__popc(partitionComplement) > 1) { // Update node pointers int currentNode = atomicAdd(currentInternalNode + globalWarpIndex, 1); int childIndex = treeletInternalNodes[currentNode]; tree->SetRightIndex(subsetRoot, childIndex); tree->SetParentIndex(childIndex, subsetRoot); int position = (1 << treeletSize) * globalWarpIndex + partitionComplement; float4 bbMin = boundingBoxesMin[position]; float4 bbMax = boundingBoxesMax[position]; float area = calculateBoundingBoxSurfaceArea(bbMin, bbMax); // Update node area and bounding box tree->SetBoundingBoxMin(childIndex, bbMin); tree->SetBoundingBoxMax(childIndex, bbMax); tree->SetArea(childIndex, area); nodesSah[childIndex] = costs[partition]; // Add child to stack int stackIndex = atomicAdd(stackSize + globalWarpIndex, 1); stackNode[globalWarpIndex * (treeletSize - 1) + stackIndex] = childIndex; stackMask[globalWarpIndex * (treeletSize - 1) + stackIndex] = partitionComplement; } else { int childIndex = treeletLeaves[__ffs(partitionComplement) - 1]; tree->SetRightIndex(subsetRoot, childIndex); tree->SetParentIndex(childIndex, subsetRoot); } } } } } __global__ void LAUNCH_BOUNDS(128, 12) treeletReestructureKernel(unsigned int numberOfTriangles, BVHTree* tree, float* nodesSah, int treeletSize, int* subtreeTriangles, unsigned int* counters, int gamma, int* schedule, int numberOfRounds, float4* boundingBoxesMin, float4* boundingBoxesMax, float* subsetAreas, int* stackNode, char* stackMask, int* stackSize, int* currentInternalNode, float ci, float ct) { // Split the pre-allocated shared memory into distinct arrays for our treelet extern __shared__ int sharedMemory[]; __shared__ int* treeletInternalNodes; __shared__ int* treeletLeaves; __shared__ float* treeletLeavesAreas; __shared__ float* costs; __shared__ char* partitionMasks; // Having only the first thread perform this assignments and then // synchronizing is actually slower than issuing the assignments on all threads int numberOfWarps = blockDim.x / WARP_SIZE; if (THREAD_WARP_INDEX == 0) { treeletInternalNodes = sharedMemory; treeletLeaves = treeletInternalNodes + (treeletSize - 1) * numberOfWarps; treeletLeavesAreas = (float*)(treeletLeaves + treeletSize * numberOfWarps); costs = treeletLeavesAreas + treeletSize * numberOfWarps; partitionMasks = (char*)(costs + (1 << treeletSize) * numberOfWarps); } __syncthreads(); // If this flag is set, the thread will be excluded from the bottom up traversal, but will // still be available to help form and optimiza treelets const int threadIndex = threadIdx.x + blockIdx.x * blockDim.x; // Initialize leaves int currentNodeIndex; if (threadIndex < numberOfTriangles) { int leafIndex = threadIndex + numberOfTriangles - 1; float area = tree->Area(leafIndex); currentNodeIndex = tree->ParentIndex(leafIndex); subtreeTriangles[leafIndex] = 1; nodesSah[leafIndex] = ct * area; } else { currentNodeIndex = -1; } while (__ballot(currentNodeIndex >= 0) != 0) { // Number of threads who already have processed the current node unsigned int counter = 0; if (currentNodeIndex >= 0) { counter = atomicAdd(&counters[currentNodeIndex], 1); // Only the last thread to arrive is allowed to process the current node. This ensures // that both its children will already have been processed if (counter == 0) { currentNodeIndex = -1; } } // How many triangles can be reached by the subtree with root at the current node int triangleCount = 0; if (counter != 0) { // Throughout the code, blocks that have loads separated from stores are so organized // in order to increase ILP (Instruction level parallelism) int left = tree->LeftIndex(currentNodeIndex); int right = tree->RightIndex(currentNodeIndex); float area = tree->Area(currentNodeIndex); int trianglesLeft = subtreeTriangles[left]; float sahLeft = nodesSah[left]; int trianglesRight = subtreeTriangles[right]; float sahRight = nodesSah[right]; triangleCount = trianglesLeft + trianglesRight; subtreeTriangles[currentNodeIndex] = triangleCount; nodesSah[currentNodeIndex] = ci * area + sahLeft + sahRight; } // Check which threads in the warp have treelets to be processed. We are only going to // process a treelet if the current node is the root of a subtree with at least gamma // triangles unsigned int vote = __ballot(triangleCount >= gamma); while (vote != 0) { // Get the thread index for the treelet that will be processed int rootThreadIndex = __ffs(vote) - 1; // Get the treelet root by reading the corresponding thread's currentNodeIndex private // variable int treeletRootIndex = __shfl(currentNodeIndex, rootThreadIndex); formTreelet(treeletRootIndex, numberOfTriangles, tree, treeletSize, WARP_ARRAY(treeletInternalNodes, treeletSize - 1), WARP_ARRAY(treeletLeaves, treeletSize), WARP_ARRAY(treeletLeavesAreas, treeletSize)); // Optimize treelet calculateSubsetSurfaceAreas(treeletSize, tree, WARP_ARRAY(treeletLeaves, treeletSize), subsetAreas, boundingBoxesMin, boundingBoxesMax, WARP_ARRAY(costs, (1 << treeletSize))); // Set leaves cost if (THREAD_WARP_INDEX < treeletSize) { int leafIndex = WARP_ARRAY_INDEX(1 << THREAD_WARP_INDEX, 1 << treeletSize); int treeletLeafIndex = WARP_ARRAY_INDEX(THREAD_WARP_INDEX, treeletSize); costs[leafIndex] = nodesSah[treeletLeaves[treeletLeafIndex]]; } WARP_SYNC; int treeletTriangles = subtreeTriangles[treeletInternalNodes[WARP_ARRAY_INDEX(0, treeletSize - 1)]]; // Process subsets of sizes 2 to treeletSize-2 using the schedule processSchedule(numberOfRounds, schedule, WARP_ARRAY(costs, (1 << treeletSize)), WARP_ARRAY(partitionMasks, (1 << treeletSize)), treeletTriangles, ci, ct); WARP_SYNC; // Procecss remaining subsets processSubsets(treeletSize, treeletTriangles, WARP_ARRAY(costs, (1 << treeletSize)), WARP_ARRAY(partitionMasks, (1 << treeletSize)), ci, ct); WARP_SYNC; updateTreelet(treeletSize, tree, WARP_ARRAY(treeletInternalNodes, treeletSize - 1), WARP_ARRAY(treeletLeaves, treeletSize), subsetAreas, WARP_ARRAY(costs, (1 << treeletSize)), WARP_ARRAY(partitionMasks, (1 << treeletSize)), nodesSah, boundingBoxesMin, boundingBoxesMax, stackNode, stackMask, stackSize, currentInternalNode); // Update vote so each treelet is only processed once (set the bit that represents the // treelet that will be processed back to 0) vote &= ~(1 << rootThreadIndex); } // Update current node pointer if (currentNodeIndex >= 0) { currentNodeIndex = tree->ParentIndex(currentNodeIndex); } } } float DeviceTreeletReestructureOptimizer(unsigned int numberOfTriangles, BVHTree* tree, unsigned int* counters, int* subtreeTrianglesCount, float* nodesSah, int treeletSize, int gamma, int* schedule, int numberOfRounds, float4* boundingBoxesMin, float4* boundingBoxesMax, float* subsetAreas, int* stackNode, char* stackMask, int* stackSize, int* currentInternalNode, float ci, float ct) { dim3 blockSize(128, 1, 1); dim3 gridSize((numberOfTriangles + (blockSize.x - 1)) / blockSize.x, 1, 1); size_t treeletMemorySize = static_cast<size_t>((2 * treeletSize - 1) * sizeof(int) + treeletSize * sizeof(float)); size_t costAndMaskSize = static_cast<size_t>( (1 << treeletSize) * sizeof(float) + (1 << treeletSize) * sizeof(char)); size_t sharedMemorySize = static_cast<size_t>( (treeletMemorySize + costAndMaskSize) * (blockSize.x / 32)); cudaFuncSetCacheConfig(treeletReestructureKernel, cudaFuncCachePreferShared); return TimeKernelExecution([&]() { treeletReestructureKernel<<<gridSize, blockSize, sharedMemorySize>>>(numberOfTriangles, tree, nodesSah, treeletSize, subtreeTrianglesCount, counters, gamma, schedule, numberOfRounds, boundingBoxesMin, boundingBoxesMax, subsetAreas, stackNode, stackMask, stackSize, currentInternalNode, ci, ct); }); } }
79859f513124518fafaca13291ae0174f07f72e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cuda_runtimecu.h> #include <sentinel.h> #include <tcl.h> #include <tclExInt.h> #ifdef DEBUGGER #include <tclExDbg.h> #endif __device__ void Tcl_InitExtensions(Tcl_Interp *interp); __device__ bool _quitFlag = false; __constant__ char _initCmd[] = "puts stdout \"Tiny Tcl 6.8.0\n\""; #ifdef TCL_MEM_DEBUG __device__ char _dumpFile[100]; __device__ int cmdCheckmem(ClientData clientData, Tcl_Interp *interp, int argc, const char *args[]) { if (argc != 2) { Tcl_AppendResult(interp, "wrong # args: should be \"", args[0], " fileName\"", (char *)NULL); return TCL_ERROR; } strcpy(_dumpFile, args[1]); _quitFlag = true; return TCL_OK; } #endif // SAMPLE COMMAND #if 1 __device__ int SampleCommand(ClientData clientData, Tcl_Interp *interp, int argc, const char *args[]) { if (argc != 2) { Tcl_AppendResult(interp, "wrong # args: should be \"", args[0], " Msg\"", (char *)NULL); return TCL_ERROR; } printf("%s\n", args[1]); return TCL_OK; } #endif struct primaryData_t { Tcl_Interp *interp; Tcl_CmdBuf buffer; int noninteractive; bool gotPartial; bool quitFlag; int retcode; } h_dataP; // MAIN-INIT #if __HIPCC__ __device__ struct primaryData_t d_dataP; void D_DATAP() { cudaErrorCheck(hipMemcpyToSymbol(d_dataP, &h_dataP, sizeof(h_dataP))); } void H_DATAP() { cudaErrorCheck(hipMemcpyFromSymbol(&h_dataP, d_dataP, sizeof(h_dataP))); } //#define _exit(v) _dataP.quitFlag = true; _dataP.retcode = 1; return #define _dataP d_dataP __global__ void g_MainInit(int argc, char *const argv[]) { #else #define _dataP h_dataP static void MainInit(int argc, char *const argv[]) { memset(&h_dataP, 0, sizeof(h_dataP)); #endif Tcl_Interp *interp = _dataP.interp = Tcl_CreateInterp(); #ifdef TCL_MEM_DEBUG Tcl_InitMemory(interp); #endif TclEx_InitDebug(interp); TclEx_InitGeneral(interp); #ifdef DEBUGGER TclEx_InitDebug(interp); #endif // Init any static extensions Tcl_InitExtensions(interp); #ifdef TCL_MEM_DEBUG Tcl_CreateCommand(interp, "checkmem", cmdCheckmem, (ClientData)0, (Tcl_CmdDeleteProc *)NULL); #endif // SAMPLE COMMAND #if 1 Tcl_CreateCommand(interp, "sample", SampleCommand, nullptr, nullptr); #endif _dataP.buffer = Tcl_CreateCmdBuf(); int result; if (argc > 1 && strcmp(argv[1], "-")) { char *filename = (char *)argv[1]; // Before we eval the file, create an args global containing the remaining arguments char *args = Tcl_Merge(argc - 2, (const char **)argv + 2); Tcl_SetVar(interp, (char *)"argv", args, TCLGLOBAL__ONLY); _freeFast(args); result = Tcl_EvalFile(interp, filename); if (result != TCL_OK) { // And make sure we print an informative error if something goes wrong Tcl_AddErrorInfo(interp, (char *)""); printf("%s\n", Tcl_GetVar(interp, (char *)"errorInfo", TCL_LEAVE_ERR_MSG)); exit(1); } exit(0); } else { // Are we in interactive mode or script from stdin mode? _dataP.noninteractive = argc > 1; #ifndef TCL_GENERIC_ONLY if (!_dataP.noninteractive) { result = Tcl_Eval(interp, _initCmd, 0, (char **)NULL); if (result != TCL_OK) { printf("%s\n", interp->result); exit(1); } } #endif _dataP.retcode = -1; return; } } static int MainInit(int argc, char *const argv[]) { memset(&h_dataP, 0, sizeof(h_dataP)); //cudaErrorCheck(hipSetDeviceFlags(hipDeviceMapHost | hipDeviceLmemResizeToMax)); cudaErrorCheck(hipSetDevice(gpuGetMaxGflopsDevice())); cudaErrorCheck(hipDeviceSetLimit(hipLimitStackSize, 1024 * 5)); sentinelServerInitialize(); // char **d_argv = cudaDeviceTransferStringArray(argc, argv); D_DATAP();hipLaunchKernelGGL(( g_MainInit), dim3(1),dim3(1), 0, 0, argc, d_argv); cudaErrorCheck(hipDeviceSynchronize()); H_DATAP(); hipFree(d_argv); return h_dataP.retcode; } // INTERACTIVE-PROMPT static void InteractiveExecute(char *line); void InteractivePrompt() { FILE *in = stdin; FILE *out = stdout; h_dataP.gotPartial = false; while (true) { clearerr(in); if (!h_dataP.gotPartial) { if (!h_dataP.noninteractive) fputs("% ", out); fflush(out); } char line[1000]; if (fgets(line, 1000, in) == NULL) { if (!h_dataP.gotPartial) exit(0); line[0] = 0; } InteractiveExecute(line); } } // INTERACTIVE-EXEC #if __HIPCC__ __global__ void g_InteractiveExecute(char *line); static void InteractiveExecute(char *line) { char *d_line; int size = (int)strlen(line) + 1; cudaErrorCheck(hipMalloc((void **)&d_line, size)); cudaErrorCheck(hipMemcpy(d_line, line, size, hipMemcpyHostToDevice)); D_DATAP();hipLaunchKernelGGL(( g_InteractiveExecute), dim3(1),dim3(1), 0, 0, d_line); cudaErrorCheck(hipDeviceSynchronize()); H_DATAP(); hipFree(d_line); } __global__ void g_InteractiveExecute(char *line) { #else static void InteractiveExecute(char *line) { #endif Tcl_Interp *interp = _dataP.interp; Tcl_CmdBuf buffer = _dataP.buffer; char *cmd = Tcl_AssembleCmd(buffer, line); if (cmd == NULL) { _dataP.gotPartial = true; return; } _dataP.gotPartial = false; #ifdef TCL_NO_HISTORY int result = Tcl_Eval(interp, cmd, 0, (char **)NULL); #else int result = Tcl_RecordAndEval(interp, cmd, 0); #endif if (result == TCL_OK) { if (*interp->result != 0 && !_dataP.noninteractive) printf("%s\n", interp->result); if (_quitFlag) { Tcl_DeleteInterp(interp); Tcl_DeleteCmdBuf(buffer); #ifdef TCL_MEM_DEBUG Tcl_DumpActiveMemory(_dumpFile); #endif exit(0); } } else { if (result == TCL_ERROR) printf("Error"); else printf("Error %d", result); if (*interp->result != 0) printf(": %s\n", interp->result); else printf("\n"); } } // MAIN-SHUTDOWN #if __HIPCC__ __global__ void g_MainShutdown(); static int MainShutdown() { D_DATAP();hipLaunchKernelGGL(( g_MainShutdown), dim3(1),dim3(1), 0, 0, ); cudaErrorCheck(hipDeviceSynchronize()); H_DATAP(); sentinelServerShutdown(); hipDeviceReset(); return h_dataP.retcode; } __global__ void g_MainShutdown() { #else static void MainShutdown() { #endif } int main(int argc, char *const argv[]) { MainInit(argc, argv); if (h_dataP.quitFlag) exit(h_dataP.retcode); if (h_dataP.retcode == -1) InteractivePrompt(); MainShutdown(); }
79859f513124518fafaca13291ae0174f07f72e1.cu
#include <cuda_runtimecu.h> #include <sentinel.h> #include <tcl.h> #include <tclExInt.h> #ifdef DEBUGGER #include <tclExDbg.h> #endif __device__ void Tcl_InitExtensions(Tcl_Interp *interp); __device__ bool _quitFlag = false; __constant__ char _initCmd[] = "puts stdout \"Tiny Tcl 6.8.0\n\""; #ifdef TCL_MEM_DEBUG __device__ char _dumpFile[100]; __device__ int cmdCheckmem(ClientData clientData, Tcl_Interp *interp, int argc, const char *args[]) { if (argc != 2) { Tcl_AppendResult(interp, "wrong # args: should be \"", args[0], " fileName\"", (char *)NULL); return TCL_ERROR; } strcpy(_dumpFile, args[1]); _quitFlag = true; return TCL_OK; } #endif // SAMPLE COMMAND #if 1 __device__ int SampleCommand(ClientData clientData, Tcl_Interp *interp, int argc, const char *args[]) { if (argc != 2) { Tcl_AppendResult(interp, "wrong # args: should be \"", args[0], " Msg\"", (char *)NULL); return TCL_ERROR; } printf("%s\n", args[1]); return TCL_OK; } #endif struct primaryData_t { Tcl_Interp *interp; Tcl_CmdBuf buffer; int noninteractive; bool gotPartial; bool quitFlag; int retcode; } h_dataP; // MAIN-INIT #if __CUDACC__ __device__ struct primaryData_t d_dataP; void D_DATAP() { cudaErrorCheck(cudaMemcpyToSymbol(d_dataP, &h_dataP, sizeof(h_dataP))); } void H_DATAP() { cudaErrorCheck(cudaMemcpyFromSymbol(&h_dataP, d_dataP, sizeof(h_dataP))); } //#define _exit(v) _dataP.quitFlag = true; _dataP.retcode = 1; return #define _dataP d_dataP __global__ void g_MainInit(int argc, char *const argv[]) { #else #define _dataP h_dataP static void MainInit(int argc, char *const argv[]) { memset(&h_dataP, 0, sizeof(h_dataP)); #endif Tcl_Interp *interp = _dataP.interp = Tcl_CreateInterp(); #ifdef TCL_MEM_DEBUG Tcl_InitMemory(interp); #endif TclEx_InitDebug(interp); TclEx_InitGeneral(interp); #ifdef DEBUGGER TclEx_InitDebug(interp); #endif // Init any static extensions Tcl_InitExtensions(interp); #ifdef TCL_MEM_DEBUG Tcl_CreateCommand(interp, "checkmem", cmdCheckmem, (ClientData)0, (Tcl_CmdDeleteProc *)NULL); #endif // SAMPLE COMMAND #if 1 Tcl_CreateCommand(interp, "sample", SampleCommand, nullptr, nullptr); #endif _dataP.buffer = Tcl_CreateCmdBuf(); int result; if (argc > 1 && strcmp(argv[1], "-")) { char *filename = (char *)argv[1]; // Before we eval the file, create an args global containing the remaining arguments char *args = Tcl_Merge(argc - 2, (const char **)argv + 2); Tcl_SetVar(interp, (char *)"argv", args, TCLGLOBAL__ONLY); _freeFast(args); result = Tcl_EvalFile(interp, filename); if (result != TCL_OK) { // And make sure we print an informative error if something goes wrong Tcl_AddErrorInfo(interp, (char *)""); printf("%s\n", Tcl_GetVar(interp, (char *)"errorInfo", TCL_LEAVE_ERR_MSG)); exit(1); } exit(0); } else { // Are we in interactive mode or script from stdin mode? _dataP.noninteractive = argc > 1; #ifndef TCL_GENERIC_ONLY if (!_dataP.noninteractive) { result = Tcl_Eval(interp, _initCmd, 0, (char **)NULL); if (result != TCL_OK) { printf("%s\n", interp->result); exit(1); } } #endif _dataP.retcode = -1; return; } } static int MainInit(int argc, char *const argv[]) { memset(&h_dataP, 0, sizeof(h_dataP)); //cudaErrorCheck(cudaSetDeviceFlags(cudaDeviceMapHost | cudaDeviceLmemResizeToMax)); cudaErrorCheck(cudaSetDevice(gpuGetMaxGflopsDevice())); cudaErrorCheck(cudaDeviceSetLimit(cudaLimitStackSize, 1024 * 5)); sentinelServerInitialize(); // char **d_argv = cudaDeviceTransferStringArray(argc, argv); D_DATAP(); g_MainInit<<<1,1>>>(argc, d_argv); cudaErrorCheck(cudaDeviceSynchronize()); H_DATAP(); cudaFree(d_argv); return h_dataP.retcode; } // INTERACTIVE-PROMPT static void InteractiveExecute(char *line); void InteractivePrompt() { FILE *in = stdin; FILE *out = stdout; h_dataP.gotPartial = false; while (true) { clearerr(in); if (!h_dataP.gotPartial) { if (!h_dataP.noninteractive) fputs("% ", out); fflush(out); } char line[1000]; if (fgets(line, 1000, in) == NULL) { if (!h_dataP.gotPartial) exit(0); line[0] = 0; } InteractiveExecute(line); } } // INTERACTIVE-EXEC #if __CUDACC__ __global__ void g_InteractiveExecute(char *line); static void InteractiveExecute(char *line) { char *d_line; int size = (int)strlen(line) + 1; cudaErrorCheck(cudaMalloc((void **)&d_line, size)); cudaErrorCheck(cudaMemcpy(d_line, line, size, cudaMemcpyHostToDevice)); D_DATAP(); g_InteractiveExecute<<<1,1>>>(d_line); cudaErrorCheck(cudaDeviceSynchronize()); H_DATAP(); cudaFree(d_line); } __global__ void g_InteractiveExecute(char *line) { #else static void InteractiveExecute(char *line) { #endif Tcl_Interp *interp = _dataP.interp; Tcl_CmdBuf buffer = _dataP.buffer; char *cmd = Tcl_AssembleCmd(buffer, line); if (cmd == NULL) { _dataP.gotPartial = true; return; } _dataP.gotPartial = false; #ifdef TCL_NO_HISTORY int result = Tcl_Eval(interp, cmd, 0, (char **)NULL); #else int result = Tcl_RecordAndEval(interp, cmd, 0); #endif if (result == TCL_OK) { if (*interp->result != 0 && !_dataP.noninteractive) printf("%s\n", interp->result); if (_quitFlag) { Tcl_DeleteInterp(interp); Tcl_DeleteCmdBuf(buffer); #ifdef TCL_MEM_DEBUG Tcl_DumpActiveMemory(_dumpFile); #endif exit(0); } } else { if (result == TCL_ERROR) printf("Error"); else printf("Error %d", result); if (*interp->result != 0) printf(": %s\n", interp->result); else printf("\n"); } } // MAIN-SHUTDOWN #if __CUDACC__ __global__ void g_MainShutdown(); static int MainShutdown() { D_DATAP(); g_MainShutdown<<<1,1>>>(); cudaErrorCheck(cudaDeviceSynchronize()); H_DATAP(); sentinelServerShutdown(); cudaDeviceReset(); return h_dataP.retcode; } __global__ void g_MainShutdown() { #else static void MainShutdown() { #endif } int main(int argc, char *const argv[]) { MainInit(argc, argv); if (h_dataP.quitFlag) exit(h_dataP.retcode); if (h_dataP.retcode == -1) InteractivePrompt(); MainShutdown(); }
419768b23bd9204b4766cf699c53680c34a3a8ad.hip
// !!! This is a file automatically generated by hipify!!! #include "imageFilteringGpu.cuh" #include <opencv2/core/cuda/common.hpp> #include <opencv2/cudev.hpp> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> texture<uchar, hipTextureType2D, hipReadModeElementType> srcTex(false, hipFilterModePoint, hipAddressModeClamp); __device__ uchar clipGpu(float val) { return (val < 0.0f) ? 0 : (val > 255.0f) ? 255 : (uchar)val; } __global__ void imageFilteringGpu ( const cv::cudev::PtrStepSz<uchar> src, cv::cudev::PtrStepSz<uchar> dst, const cv::cudev::PtrStepSz<float> kernel, const int border_size ) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if((y >= border_size) && y < (dst.rows-border_size)){ if((x >= border_size) && (x < (dst.cols-border_size))){ float sum = 0.0f; for(int yy = 0; yy < kernel.rows; yy++){ for(int xx = 0; xx < kernel.cols; xx++){ sum = __fadd_rn(sum, __fmul_rn(kernel.ptr(yy)[xx], src.ptr(y+yy-border_size)[x+xx-border_size])); } } dst.ptr(y)[x] = clipGpu(sum); } } } // use __ldg __global__ void imageFilteringGpu_ldg ( const cv::cudev::PtrStepSz<uchar> src, cv::cudev::PtrStepSz<uchar> dst, const cv::cudev::PtrStepSz<float> kernel, const int border_size ) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if((y >= border_size) && y < (dst.rows-border_size)){ if((x >= border_size) && (x < (dst.cols-border_size))){ float sum = 0.0f; for(int yy = 0; yy < kernel.rows; yy++){ const uchar* psrc = src.ptr(y+yy-border_size) + (x-border_size); const float* pkernel = kernel.ptr(yy); for(int xx = 0; xx < kernel.cols; xx++){ sum = __fadd_rn(sum, __fmul_rn(__ldg(&pkernel[xx]), __ldg(&psrc[xx]))); } } dst.ptr(y)[x] = sum; } } } // use texture __global__ void imageFilteringGpu_tex ( const cv::cudev::PtrStepSz<uchar> src, cv::cudev::PtrStepSz<uchar> dst, const cv::cudev::PtrStepSz<float> kernel, const int border_size ) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if((y >= border_size) && (y < (dst.rows-border_size))){ if((x >= border_size) && (x < (dst.cols-border_size))){ float sum = 0.0f; for(int yy = 0; yy < kernel.rows; yy++){ for(int xx = 0; xx < kernel.cols; xx++){ sum = __fadd_rn(sum, __fmul_rn(kernel.ptr(yy)[xx], tex2D(srcTex, x + xx - border_size, y + yy - border_size))); } } dst.ptr(y)[x] = sum; } } } void launchImageFilteringGpu ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size ) { cv::cudev::PtrStepSz<uchar> pSrc = cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step); cv::cudev::PtrStepSz<uchar> pDst = cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step); cv::cudev::PtrStepSz<float> pKernel = cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step); const dim3 block(64, 2); const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y)); hipLaunchKernelGGL(( imageFilteringGpu), dim3(grid), dim3(block), 0, 0, pSrc, pDst, pKernel, border_size); CV_CUDEV_SAFE_CALL(hipGetLastError()); CV_CUDEV_SAFE_CALL(hipDeviceSynchronize()); } // use __ldg void launchImageFilteringGpu_ldg ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size ) { cv::cudev::PtrStepSz<uchar> pSrc = cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step); cv::cudev::PtrStepSz<uchar> pDst = cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step); cv::cudev::PtrStepSz<float> pKernel = cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step); const dim3 block(64, 2); const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y)); hipLaunchKernelGGL(( imageFilteringGpu_ldg), dim3(grid), dim3(block), 0, 0, pSrc, pDst, pKernel, border_size); CV_CUDEV_SAFE_CALL(hipGetLastError()); CV_CUDEV_SAFE_CALL(hipDeviceSynchronize()); } // use texture void launchImageFilteringGpu_tex ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size ) { cv::cudev::PtrStepSz<uchar> pSrc = cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step); cv::cudev::PtrStepSz<uchar> pDst = cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step); cv::cudev::PtrStepSz<float> pKernel = cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step); // bind texture cv::cuda::device::bindTexture<uchar>(&srcTex, pSrc); const dim3 block(64, 2); const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y)); hipLaunchKernelGGL(( imageFilteringGpu_tex), dim3(grid), dim3(block), 0, 0, pSrc, pDst, pKernel, border_size); CV_CUDEV_SAFE_CALL(hipGetLastError()); CV_CUDEV_SAFE_CALL(hipDeviceSynchronize()); // unbind texture CV_CUDEV_SAFE_CALL(hipUnbindTexture(srcTex)); } double launchImageFilteringGpu ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size, const int loop_num ) { double f = 1000.0f / cv::getTickFrequency(); int64 start = 0, end = 0; double time = 0.0; for (int i = 0; i <= loop_num; i++){ start = cv::getTickCount(); launchImageFilteringGpu(src, dst, kernel, border_size); end = cv::getTickCount(); time += (i > 0) ? ((end - start) * f) : 0; } time /= loop_num; return time; } double launchImageFilteringGpu_ldg ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size, const int loop_num ) { double f = 1000.0f / cv::getTickFrequency(); int64 start = 0, end = 0; double time = 0.0; for (int i = 0; i <= loop_num; i++){ start = cv::getTickCount(); launchImageFilteringGpu_ldg(src, dst, kernel, border_size); end = cv::getTickCount(); time += (i > 0) ? ((end - start) * f) : 0; } time /= loop_num; return time; } double launchImageFilteringGpu_tex ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size, const int loop_num ) { double f = 1000.0f / cv::getTickFrequency(); int64 start = 0, end = 0; double time = 0.0; for (int i = 0; i <= loop_num; i++){ start = cv::getTickCount(); launchImageFilteringGpu_tex(src, dst, kernel, border_size); end = cv::getTickCount(); time += (i > 0) ? ((end - start) * f) : 0; } time /= loop_num; return time; }
419768b23bd9204b4766cf699c53680c34a3a8ad.cu
#include "imageFilteringGpu.cuh" #include <opencv2/core/cuda/common.hpp> #include <opencv2/cudev.hpp> #include <cuda_runtime.h> #include <device_launch_parameters.h> texture<uchar, cudaTextureType2D, cudaReadModeElementType> srcTex(false, cudaFilterModePoint, cudaAddressModeClamp); __device__ uchar clipGpu(float val) { return (val < 0.0f) ? 0 : (val > 255.0f) ? 255 : (uchar)val; } __global__ void imageFilteringGpu ( const cv::cudev::PtrStepSz<uchar> src, cv::cudev::PtrStepSz<uchar> dst, const cv::cudev::PtrStepSz<float> kernel, const int border_size ) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if((y >= border_size) && y < (dst.rows-border_size)){ if((x >= border_size) && (x < (dst.cols-border_size))){ float sum = 0.0f; for(int yy = 0; yy < kernel.rows; yy++){ for(int xx = 0; xx < kernel.cols; xx++){ sum = __fadd_rn(sum, __fmul_rn(kernel.ptr(yy)[xx], src.ptr(y+yy-border_size)[x+xx-border_size])); } } dst.ptr(y)[x] = clipGpu(sum); } } } // use __ldg __global__ void imageFilteringGpu_ldg ( const cv::cudev::PtrStepSz<uchar> src, cv::cudev::PtrStepSz<uchar> dst, const cv::cudev::PtrStepSz<float> kernel, const int border_size ) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if((y >= border_size) && y < (dst.rows-border_size)){ if((x >= border_size) && (x < (dst.cols-border_size))){ float sum = 0.0f; for(int yy = 0; yy < kernel.rows; yy++){ const uchar* psrc = src.ptr(y+yy-border_size) + (x-border_size); const float* pkernel = kernel.ptr(yy); for(int xx = 0; xx < kernel.cols; xx++){ sum = __fadd_rn(sum, __fmul_rn(__ldg(&pkernel[xx]), __ldg(&psrc[xx]))); } } dst.ptr(y)[x] = sum; } } } // use texture __global__ void imageFilteringGpu_tex ( const cv::cudev::PtrStepSz<uchar> src, cv::cudev::PtrStepSz<uchar> dst, const cv::cudev::PtrStepSz<float> kernel, const int border_size ) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if((y >= border_size) && (y < (dst.rows-border_size))){ if((x >= border_size) && (x < (dst.cols-border_size))){ float sum = 0.0f; for(int yy = 0; yy < kernel.rows; yy++){ for(int xx = 0; xx < kernel.cols; xx++){ sum = __fadd_rn(sum, __fmul_rn(kernel.ptr(yy)[xx], tex2D(srcTex, x + xx - border_size, y + yy - border_size))); } } dst.ptr(y)[x] = sum; } } } void launchImageFilteringGpu ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size ) { cv::cudev::PtrStepSz<uchar> pSrc = cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step); cv::cudev::PtrStepSz<uchar> pDst = cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step); cv::cudev::PtrStepSz<float> pKernel = cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step); const dim3 block(64, 2); const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y)); imageFilteringGpu<<<grid, block>>>(pSrc, pDst, pKernel, border_size); CV_CUDEV_SAFE_CALL(cudaGetLastError()); CV_CUDEV_SAFE_CALL(cudaDeviceSynchronize()); } // use __ldg void launchImageFilteringGpu_ldg ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size ) { cv::cudev::PtrStepSz<uchar> pSrc = cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step); cv::cudev::PtrStepSz<uchar> pDst = cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step); cv::cudev::PtrStepSz<float> pKernel = cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step); const dim3 block(64, 2); const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y)); imageFilteringGpu_ldg<<<grid, block>>>(pSrc, pDst, pKernel, border_size); CV_CUDEV_SAFE_CALL(cudaGetLastError()); CV_CUDEV_SAFE_CALL(cudaDeviceSynchronize()); } // use texture void launchImageFilteringGpu_tex ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size ) { cv::cudev::PtrStepSz<uchar> pSrc = cv::cudev::PtrStepSz<uchar>(src.rows, src.cols * src.channels(), src.ptr<uchar>(), src.step); cv::cudev::PtrStepSz<uchar> pDst = cv::cudev::PtrStepSz<uchar>(dst.rows, dst.cols * dst.channels(), dst.ptr<uchar>(), dst.step); cv::cudev::PtrStepSz<float> pKernel = cv::cudev::PtrStepSz<float>(kernel.rows, kernel.cols * kernel.channels(), kernel.ptr<float>(), kernel.step); // bind texture cv::cuda::device::bindTexture<uchar>(&srcTex, pSrc); const dim3 block(64, 2); const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y)); imageFilteringGpu_tex<<<grid, block>>>(pSrc, pDst, pKernel, border_size); CV_CUDEV_SAFE_CALL(cudaGetLastError()); CV_CUDEV_SAFE_CALL(cudaDeviceSynchronize()); // unbind texture CV_CUDEV_SAFE_CALL(cudaUnbindTexture(srcTex)); } double launchImageFilteringGpu ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size, const int loop_num ) { double f = 1000.0f / cv::getTickFrequency(); int64 start = 0, end = 0; double time = 0.0; for (int i = 0; i <= loop_num; i++){ start = cv::getTickCount(); launchImageFilteringGpu(src, dst, kernel, border_size); end = cv::getTickCount(); time += (i > 0) ? ((end - start) * f) : 0; } time /= loop_num; return time; } double launchImageFilteringGpu_ldg ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size, const int loop_num ) { double f = 1000.0f / cv::getTickFrequency(); int64 start = 0, end = 0; double time = 0.0; for (int i = 0; i <= loop_num; i++){ start = cv::getTickCount(); launchImageFilteringGpu_ldg(src, dst, kernel, border_size); end = cv::getTickCount(); time += (i > 0) ? ((end - start) * f) : 0; } time /= loop_num; return time; } double launchImageFilteringGpu_tex ( cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& kernel, const int border_size, const int loop_num ) { double f = 1000.0f / cv::getTickFrequency(); int64 start = 0, end = 0; double time = 0.0; for (int i = 0; i <= loop_num; i++){ start = cv::getTickCount(); launchImageFilteringGpu_tex(src, dst, kernel, border_size); end = cv::getTickCount(); time += (i > 0) ? ((end - start) * f) : 0; } time /= loop_num; return time; }
e83f687ab6e333269680011afc9734b60ca9162c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void GPU_kernel(int max_itemcount) { int current_index = threadIdx.x + (blockIdx.x * blockDim.x); if (current_index < max_itemcount) printf("%i\n", current_index); } int main(void) { GPU_kernel << < 1, 10 >> > (100); hipDeviceSynchronize(); printf("Finished execution!\n"); return 0; }
e83f687ab6e333269680011afc9734b60ca9162c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void GPU_kernel(int max_itemcount) { int current_index = threadIdx.x + (blockIdx.x * blockDim.x); if (current_index < max_itemcount) printf("%i\n", current_index); } int main(void) { GPU_kernel << < 1, 10 >> > (100); cudaDeviceSynchronize(); printf("Finished execution!\n"); return 0; }
69357203b4ad4f4fd315267800a8d9f0b894a1e3.hip
// !!! This is a file automatically generated by hipify!!! /* * SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <[email protected]> * SPDX-License-Identifier: BSD-3-Clause * SPDX-FileType: SOURCE * * This program is free software: you can redistribute it and/or modify it * under the terms of the license found in the LICENSE.txt file in the root * directory of this source tree. */ // ======= // Headers // ======= #include "./cu_lanczos_tridiagonalization.h" #include <rocblas.h> // hipblasHandle_t #include <cmath> // sqrt #include "./cu_orthogonalization.h" // cuOrthogonalization #include "../_cu_basic_algebra/cu_vector_operations.h" // cuVectorOperations #include "../_cuda_utilities/cuda_interface.h" // alloc, copy_to_device, del // ============================ // c lanczos tridiagonalization // ============================ /// \brief Tri-diagonalizes matrix \c A to \c T using the start vector \c /// v. \c is the Lanczos degree, which will be the size of square /// matrix \c T. /// /// \details The output of this function is not an explicit matrix \c T, /// rather are the two arrays \c alpha of length \c m and \c beta /// of length \c m+1. The array \c alpha[:] represents the diagonal /// elements and \c beta[1:] represents the off-diagonal elements /// of the tri-diagonal \c (m,m) symmetric and positive-definite /// matrix \c T. /// /// #### Lanczos tridiagonalization vs Golub-Kahn bidiagonalization /// /// * The Lanczos tri-diagonalization is twice faster (in runtime), /// as it has only one matrix-vector multiplication. Whereas the /// Golub-Kahn bi-diagonalization has two matrix-vector /// multiplications. /// * The Lanczos tri-diagonalization can only be applied to /// symmetric matrices. Whereas the Golub-Kahn bi-diagonalization /// can be applied to any matrix. /// /// #### Algorithm /// /// The algorithm and notations are obtained from [DEMMEL], p. 57, /// Algorithm 4.6 (see also [SAAD] p. 137, Algorithm 6.5). However /// there are four ways to implement the iteration. [PAIGE]_ has /// shown that the iteration that is implemented below is the most /// stable against loosing orthogonality of the eigenvectors. For /// details, see [CULLUM]_ p. 46, and p.48, particularly the /// algorithm denoted by A(2,7). The differences of these /// implementations are the order in which \f$ \alpha_j \f$ and \f$ /// \beta_j \f$ are defined and the order in which vectors are /// subtracted from \c r . /// /// #### References /// /// * [DEMMEL] Demmel, J., Templates for solution of Algebraic /// Eigenvalue Problems, p. 57. /// * [SAAD] Saad, Numerical Methods for Large Eigenvalue Problems, /// p. 137. /// * [PAIGE] Paige (1980) Accuracy and effectiveness of the /// Lanczos algorithm for the symmetric eigenproblem. /// * [CULLUM] Cullum; Willoughby. Lanczos Algorithms for Large /// Symmetric Eigenvalue Computations. 1. pp.46-48. /// /// \param[in] A /// A linear operator that represents a matrix of size \c (n,n) /// and can perform matrix-vector operation with \c dot() method. /// This matrix should be positive-definite. /// \param[in] v /// Start vector for the Lanczos tri-diagonalization. Column vector /// of size c n. It could be generated randomly. Often it is /// generated by the Rademacher distribution with entries c +1 and /// \c -1. /// \param[in] n /// Size of the square matrix \c A, which is also the size of the /// vector \c v. /// \param[in] m /// Lanczos degree, which is the number of Lanczos iterations. /// \param[in] lanczos_tol /// The tolerance of the residual error of the Lanczos iteration. /// \param[in] orthogonalize /// Indicates whether to orthogonalize the orthogonal eigenvectors /// during Lanczos recursive iterations. /// * If set to \c 0, no orthogonalization is performed. /// * If set to a negative integer, a newly computed eigenvector is /// orthogonalized against all the previous eigenvectors (full /// reorthogonalization). /// * If set to a positive integer, say \c q less than /// \c lanczos_degree, the newly computed eigenvector is /// orthogonalized against the last \c q previous eigenvectors /// (partial reorthogonalization). /// * If set to an integer larger than \c lanczos_degree, it is cut /// to \c lanczos_degree, which effectively orthogonalizes /// against all previous eigenvectors (full reorthogonalization). /// \param[out] alpha /// This is a 1D array of size \c m. The array \c alpha[:] /// constitute the diagonal elements of the tri-diagonal matrix \c /// T. This is the output and written in place. /// \param[out] beta /// This is a 1D array of size \c m. The array \c beta[:] /// constitute the off-diagonals of the tri-diagonal matrix \c T. /// This array is the output and written in place. /// \return Counter for the Lanczos iterations. Normally, the size of the /// output matrix should be \c (m,m), which is the Lanczos degree. /// However, if the algorithm terminates early, the size of \c /// alpha and \c beta, and hence the output tri-diagonal matrix, is /// smaller. This counter keeps track of the *non-zero* size of \c /// alpha and \c beta. template <typename DataType> IndexType cu_lanczos_tridiagonalization( cuLinearOperator<DataType>* A, const DataType* v, const LongIndexType n, const IndexType m, const DataType lanczos_tol, const FlagType orthogonalize, DataType* alpha, DataType* beta) { // Get cublas handle hipblasHandle_t cublas_handle = A->get_cublas_handle(); // buffer_size is number of last orthogonal vectors to keep in the buffer V IndexType buffer_size; if (orthogonalize == 0 || orthogonalize == 1) { // At least two vectors must be stored in buffer for Lanczos recursion buffer_size = 2; } else if ((orthogonalize < 0) || (orthogonalize > static_cast<FlagType>(m))) { // Using full reorthogonalization, keep all of the m vectors in buffer buffer_size = m; } else { // Orthogonalize with less than m vectors (0 < orthogonalize < m) buffer_size = orthogonalize; } // Allocate 2D array (as 1D array, and coalesced row-wise) to store // the last buffer_size of orthogonalized vectors of length n. New vectors // are stored by cycling through the buffer to replace with old ones. DataType* device_V = CudaInterface<DataType>::alloc(n * buffer_size); // Allocate vector r DataType* device_r = CudaInterface<DataType>::alloc(n); // Copy v into r CudaInterface<DataType>::copy_to_device(v, n, device_r); // Initial beta DataType initial_beta = cuVectorOperations<DataType>::euclidean_norm( cublas_handle, device_r, n); // Declare iterators IndexType j; IndexType lanczos_size = 0; IndexType num_ortho; // In the following, beta[j] means beta[j-1] in the Demmel text for (j=0; j < m; ++j) { // Update the size of Lanczos tridiagonal matrix ++lanczos_size; // Normalize r and copy to the j-th column of V if (j == 0) { cuVectorOperations<DataType>::copy_scaled_vector( cublas_handle, device_r, n, 1.0/initial_beta, &device_V[(j % buffer_size)*n]); } else { cuVectorOperations<DataType>::copy_scaled_vector( cublas_handle, device_r, n, 1.0/beta[j-1], &device_V[(j % buffer_size)*n]); } // Multiply A to the j-th column of V, write into r A->dot(&device_V[(j % buffer_size)*n], device_r); // alpha[j] is V[:, j] dot r alpha[j] = cuVectorOperations<DataType>::inner_product( cublas_handle, &device_V[(j % buffer_size)*n], device_r, n); // Subtract V[:,j] * alpha[j] from r cuVectorOperations<DataType>::subtract_scaled_vector( cublas_handle, &device_V[(j % buffer_size)*n], n, alpha[j], device_r); // Subtract V[:,j-1] * beta[j] from r if (j > 0) { cuVectorOperations<DataType>::subtract_scaled_vector( cublas_handle, &device_V[((j-1) % buffer_size)*n], n, beta[j-1], device_r); } // Gram-Schmidt process (full re-orthogonalization) if (orthogonalize != 0) { // Find how many column vectors are filled so far in the buffer V if (j < buffer_size) { num_ortho = j+1; } else { num_ortho = buffer_size; } // Gram-Schmidt process cuOrthogonalization<DataType>::gram_schmidt_process( cublas_handle, &device_V[0], n, buffer_size, j%buffer_size, num_ortho, device_r); } // beta is norm of r beta[j] = cuVectorOperations<DataType>::euclidean_norm( cublas_handle, device_r, n); // Exit criterion when the vector r is zero. If each component of a // zero vector has the tolerance epsilon, (which is called lanczos_tol // here), the tolerance of norm of r is epsilon times sqrt of n. if (beta[j] < lanczos_tol * sqrt(n)) { break; } } // Free dynamic memory CudaInterface<DataType>::del(device_V); CudaInterface<DataType>::del(device_r); return lanczos_size; } // =============================== // Explicit template instantiation // =============================== // lanczos tridiagonalization template IndexType cu_lanczos_tridiagonalization<float>( cuLinearOperator<float>* A, const float* v, const LongIndexType n, const IndexType m, const float lanczos_tol, const FlagType orthogonalize, float* alpha, float* beta); template IndexType cu_lanczos_tridiagonalization<double>( cuLinearOperator<double>* A, const double* v, const LongIndexType n, const IndexType m, const double lanczos_tol, const FlagType orthogonalize, double* alpha, double* beta);
69357203b4ad4f4fd315267800a8d9f0b894a1e3.cu
/* * SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <[email protected]> * SPDX-License-Identifier: BSD-3-Clause * SPDX-FileType: SOURCE * * This program is free software: you can redistribute it and/or modify it * under the terms of the license found in the LICENSE.txt file in the root * directory of this source tree. */ // ======= // Headers // ======= #include "./cu_lanczos_tridiagonalization.h" #include <cublas_v2.h> // cublasHandle_t #include <cmath> // sqrt #include "./cu_orthogonalization.h" // cuOrthogonalization #include "../_cu_basic_algebra/cu_vector_operations.h" // cuVectorOperations #include "../_cuda_utilities/cuda_interface.h" // alloc, copy_to_device, del // ============================ // c lanczos tridiagonalization // ============================ /// \brief Tri-diagonalizes matrix \c A to \c T using the start vector \c /// v. \c is the Lanczos degree, which will be the size of square /// matrix \c T. /// /// \details The output of this function is not an explicit matrix \c T, /// rather are the two arrays \c alpha of length \c m and \c beta /// of length \c m+1. The array \c alpha[:] represents the diagonal /// elements and \c beta[1:] represents the off-diagonal elements /// of the tri-diagonal \c (m,m) symmetric and positive-definite /// matrix \c T. /// /// #### Lanczos tridiagonalization vs Golub-Kahn bidiagonalization /// /// * The Lanczos tri-diagonalization is twice faster (in runtime), /// as it has only one matrix-vector multiplication. Whereas the /// Golub-Kahn bi-diagonalization has two matrix-vector /// multiplications. /// * The Lanczos tri-diagonalization can only be applied to /// symmetric matrices. Whereas the Golub-Kahn bi-diagonalization /// can be applied to any matrix. /// /// #### Algorithm /// /// The algorithm and notations are obtained from [DEMMEL], p. 57, /// Algorithm 4.6 (see also [SAAD] p. 137, Algorithm 6.5). However /// there are four ways to implement the iteration. [PAIGE]_ has /// shown that the iteration that is implemented below is the most /// stable against loosing orthogonality of the eigenvectors. For /// details, see [CULLUM]_ p. 46, and p.48, particularly the /// algorithm denoted by A(2,7). The differences of these /// implementations are the order in which \f$ \alpha_j \f$ and \f$ /// \beta_j \f$ are defined and the order in which vectors are /// subtracted from \c r . /// /// #### References /// /// * [DEMMEL] Demmel, J., Templates for solution of Algebraic /// Eigenvalue Problems, p. 57. /// * [SAAD] Saad, Numerical Methods for Large Eigenvalue Problems, /// p. 137. /// * [PAIGE] Paige (1980) Accuracy and effectiveness of the /// Lanczos algorithm for the symmetric eigenproblem. /// * [CULLUM] Cullum; Willoughby. Lanczos Algorithms for Large /// Symmetric Eigenvalue Computations. 1. pp.46-48. /// /// \param[in] A /// A linear operator that represents a matrix of size \c (n,n) /// and can perform matrix-vector operation with \c dot() method. /// This matrix should be positive-definite. /// \param[in] v /// Start vector for the Lanczos tri-diagonalization. Column vector /// of size c n. It could be generated randomly. Often it is /// generated by the Rademacher distribution with entries c +1 and /// \c -1. /// \param[in] n /// Size of the square matrix \c A, which is also the size of the /// vector \c v. /// \param[in] m /// Lanczos degree, which is the number of Lanczos iterations. /// \param[in] lanczos_tol /// The tolerance of the residual error of the Lanczos iteration. /// \param[in] orthogonalize /// Indicates whether to orthogonalize the orthogonal eigenvectors /// during Lanczos recursive iterations. /// * If set to \c 0, no orthogonalization is performed. /// * If set to a negative integer, a newly computed eigenvector is /// orthogonalized against all the previous eigenvectors (full /// reorthogonalization). /// * If set to a positive integer, say \c q less than /// \c lanczos_degree, the newly computed eigenvector is /// orthogonalized against the last \c q previous eigenvectors /// (partial reorthogonalization). /// * If set to an integer larger than \c lanczos_degree, it is cut /// to \c lanczos_degree, which effectively orthogonalizes /// against all previous eigenvectors (full reorthogonalization). /// \param[out] alpha /// This is a 1D array of size \c m. The array \c alpha[:] /// constitute the diagonal elements of the tri-diagonal matrix \c /// T. This is the output and written in place. /// \param[out] beta /// This is a 1D array of size \c m. The array \c beta[:] /// constitute the off-diagonals of the tri-diagonal matrix \c T. /// This array is the output and written in place. /// \return Counter for the Lanczos iterations. Normally, the size of the /// output matrix should be \c (m,m), which is the Lanczos degree. /// However, if the algorithm terminates early, the size of \c /// alpha and \c beta, and hence the output tri-diagonal matrix, is /// smaller. This counter keeps track of the *non-zero* size of \c /// alpha and \c beta. template <typename DataType> IndexType cu_lanczos_tridiagonalization( cuLinearOperator<DataType>* A, const DataType* v, const LongIndexType n, const IndexType m, const DataType lanczos_tol, const FlagType orthogonalize, DataType* alpha, DataType* beta) { // Get cublas handle cublasHandle_t cublas_handle = A->get_cublas_handle(); // buffer_size is number of last orthogonal vectors to keep in the buffer V IndexType buffer_size; if (orthogonalize == 0 || orthogonalize == 1) { // At least two vectors must be stored in buffer for Lanczos recursion buffer_size = 2; } else if ((orthogonalize < 0) || (orthogonalize > static_cast<FlagType>(m))) { // Using full reorthogonalization, keep all of the m vectors in buffer buffer_size = m; } else { // Orthogonalize with less than m vectors (0 < orthogonalize < m) buffer_size = orthogonalize; } // Allocate 2D array (as 1D array, and coalesced row-wise) to store // the last buffer_size of orthogonalized vectors of length n. New vectors // are stored by cycling through the buffer to replace with old ones. DataType* device_V = CudaInterface<DataType>::alloc(n * buffer_size); // Allocate vector r DataType* device_r = CudaInterface<DataType>::alloc(n); // Copy v into r CudaInterface<DataType>::copy_to_device(v, n, device_r); // Initial beta DataType initial_beta = cuVectorOperations<DataType>::euclidean_norm( cublas_handle, device_r, n); // Declare iterators IndexType j; IndexType lanczos_size = 0; IndexType num_ortho; // In the following, beta[j] means beta[j-1] in the Demmel text for (j=0; j < m; ++j) { // Update the size of Lanczos tridiagonal matrix ++lanczos_size; // Normalize r and copy to the j-th column of V if (j == 0) { cuVectorOperations<DataType>::copy_scaled_vector( cublas_handle, device_r, n, 1.0/initial_beta, &device_V[(j % buffer_size)*n]); } else { cuVectorOperations<DataType>::copy_scaled_vector( cublas_handle, device_r, n, 1.0/beta[j-1], &device_V[(j % buffer_size)*n]); } // Multiply A to the j-th column of V, write into r A->dot(&device_V[(j % buffer_size)*n], device_r); // alpha[j] is V[:, j] dot r alpha[j] = cuVectorOperations<DataType>::inner_product( cublas_handle, &device_V[(j % buffer_size)*n], device_r, n); // Subtract V[:,j] * alpha[j] from r cuVectorOperations<DataType>::subtract_scaled_vector( cublas_handle, &device_V[(j % buffer_size)*n], n, alpha[j], device_r); // Subtract V[:,j-1] * beta[j] from r if (j > 0) { cuVectorOperations<DataType>::subtract_scaled_vector( cublas_handle, &device_V[((j-1) % buffer_size)*n], n, beta[j-1], device_r); } // Gram-Schmidt process (full re-orthogonalization) if (orthogonalize != 0) { // Find how many column vectors are filled so far in the buffer V if (j < buffer_size) { num_ortho = j+1; } else { num_ortho = buffer_size; } // Gram-Schmidt process cuOrthogonalization<DataType>::gram_schmidt_process( cublas_handle, &device_V[0], n, buffer_size, j%buffer_size, num_ortho, device_r); } // beta is norm of r beta[j] = cuVectorOperations<DataType>::euclidean_norm( cublas_handle, device_r, n); // Exit criterion when the vector r is zero. If each component of a // zero vector has the tolerance epsilon, (which is called lanczos_tol // here), the tolerance of norm of r is epsilon times sqrt of n. if (beta[j] < lanczos_tol * sqrt(n)) { break; } } // Free dynamic memory CudaInterface<DataType>::del(device_V); CudaInterface<DataType>::del(device_r); return lanczos_size; } // =============================== // Explicit template instantiation // =============================== // lanczos tridiagonalization template IndexType cu_lanczos_tridiagonalization<float>( cuLinearOperator<float>* A, const float* v, const LongIndexType n, const IndexType m, const float lanczos_tol, const FlagType orthogonalize, float* alpha, float* beta); template IndexType cu_lanczos_tridiagonalization<double>( cuLinearOperator<double>* A, const double* v, const LongIndexType n, const IndexType m, const double lanczos_tol, const FlagType orthogonalize, double* alpha, double* beta);
c11b20b731729ebaaedd4ce2e7058ae23822e598.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void pnpoly_cnGPU(char *cs, const float *px, const float *py, const float *vx, const float *vy, int npoint, int nvert) { extern __shared__ int s[]; float *tvx = (float*) s; float *tvy = (float*)&s[nvert]; int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < npoint) { int j, k, c = 0; for (j = 0, k = nvert-1; j < nvert; k = j++) { tvx[j] = vx [j]; tvy[j] = vy [j]; } __syncthreads(); for (j = 0, k = nvert-1; j < nvert; k = j++) { if ( ((tvy[j]>py[i]) != (tvy[k]>py[i])) && (px[i] < (tvx[k]-tvx[j]) * (py[i]-tvy[j]) / (tvy[k]-tvy[j]) + tvx[j]) ) c = !c; } cs[i] = c & 1; } }
c11b20b731729ebaaedd4ce2e7058ae23822e598.cu
#include "includes.h" __global__ void pnpoly_cnGPU(char *cs, const float *px, const float *py, const float *vx, const float *vy, int npoint, int nvert) { extern __shared__ int s[]; float *tvx = (float*) s; float *tvy = (float*)&s[nvert]; int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < npoint) { int j, k, c = 0; for (j = 0, k = nvert-1; j < nvert; k = j++) { tvx[j] = vx [j]; tvy[j] = vy [j]; } __syncthreads(); for (j = 0, k = nvert-1; j < nvert; k = j++) { if ( ((tvy[j]>py[i]) != (tvy[k]>py[i])) && (px[i] < (tvx[k]-tvx[j]) * (py[i]-tvy[j]) / (tvy[k]-tvy[j]) + tvx[j]) ) c = !c; } cs[i] = c & 1; } }
e271daa33ef23b48de7d5f0d556434130f3da4eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "noise_manager.h" #include <chrono> #include <cmath> #include <iostream> #include <memory> #include <random> #include "cuda_math_util.h" #include "cuda_util.h" #include <hipcub/hipcub.hpp> #include "io_iterator.h" namespace RPU { template <typename T> __global__ void kernelAbsMaxNPSum( float *scale_values, const int m_batch, const float *amax_values, const T *psum_values, const T *nsum_values, const T out_bound, const T assumed_wmax, const T bm_max // io.max_bm_res/io.inp_res ) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < m_batch) { // w_max*MAX(psum,nsum)/out_bound < scale T w_max = assumed_wmax; T amax = amax_values[tid]; T psum = psum_values[tid]; T nsum = -nsum_values[tid]; T sum = MAX(psum, nsum); scale_values[tid] = MAX(amax, MIN(sum * w_max / out_bound, amax * bm_max)); ; } } template <typename InputIteratorT, typename T> __global__ void kernelNPSumBatchTrans( InputIteratorT input, const int total_size_in, const int m_batch_in, T *psum_values, T *nsum_values, T *psum_values0, T *nsum_values0) { // -- only use this version if m_batch < blockDim.x !!! // -- probably: strided version would be faster... int tid = blockDim.x * blockIdx.x + threadIdx.x; // assumes that shared is of size 2*nthreads*sizeof(T) !!!!!!!!!! extern __shared__ __align__(sizeof(double)) unsigned char rpu_smem_nm[]; T *block_sum_values = reinterpret_cast<T *>(rpu_smem_nm); T *block_psum_values = &block_sum_values[0]; T *block_nsum_values = &block_sum_values[blockDim.x]; const int size = total_size_in; const int m_batch = m_batch_in; block_psum_values[threadIdx.x] = (T)0.0; block_nsum_values[threadIdx.x] = (T)0.0; __syncthreads(); if (tid < m_batch) { psum_values0[tid] = (T)0.0; nsum_values0[tid] = (T)0.0; } if (tid < size) { T value = input[tid]; int midx = tid % m_batch; if (value >= 0) { atomicAdd(&(block_psum_values[midx]), value); } else { atomicAdd(&(block_nsum_values[midx]), value); } } __syncthreads(); int bidx = threadIdx.x; if (bidx < m_batch) { atomicAdd(&(psum_values[bidx]), block_psum_values[bidx]); atomicAdd(&(nsum_values[bidx]), block_nsum_values[bidx]); } } template <typename InputIteratorT, typename T> __global__ void kernelNPSumBatchTrans_LargeBatch( InputIteratorT input, const int total_size_in, const int m_batch_in, T *psum_values, T *nsum_values, T *psum_values0, T *nsum_values0) { // -- use this version if m_batch >= blockDim.x // -- just uses atomic on global memory int tid = blockDim.x * blockIdx.x + threadIdx.x; const int size = total_size_in; const int m_batch = m_batch_in; if (tid < m_batch) { psum_values0[tid] = (T)0.0; nsum_values0[tid] = (T)0.0; } if (tid < size) { T value = input[tid]; int midx = tid % m_batch; if (value >= 0) { atomicAdd(&psum_values[midx], value); } else { atomicAdd(&nsum_values[midx], value); } } } template <typename T> __global__ void kernelAverageAbsMaxSetScales( float *scales, float *ravg, const float *sum, const int m_batch_in, T decay_rate_in) { int tid = blockDim.x * blockIdx.x + threadIdx.x; const int m_batch = m_batch_in; T decay_rate = decay_rate_in; T max_avg = (*sum) / m_batch; T run_avg = *ravg; if (tid < m_batch) { scales[tid] = (float)(run_avg * (1.0 - decay_rate) + decay_rate * max_avg); } if (tid == m_batch) { *ravg = (float)(run_avg * (1.0 - decay_rate) + decay_rate * max_avg); } } template <typename T> __global__ void kernelAverageAbsMaxSingleMomentum(float *ravg, const float *sum, const int m_batch, T decay_rate) { // just single block! int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { T max_avg = (*sum) / m_batch; *ravg = (float)(*ravg * (1.0 - decay_rate) + decay_rate * max_avg); } } /****************************************************************************************************************/ /* NoiseManager */ /******************************************************************************************************************/ #define LAUNCH_NPSUM_KERNEL(KNAME, SHARED_MEM, ARGS) \ hipLaunchKernelGGL(( KNAME<InputIteratorT, T>), dim3(nblocks), dim3(nthreads), SHARED_MEM, s, ARGS; template <typename T> NoiseManager<T>::NoiseManagerCudaContext *c, int size) : size_(size), context_(c), buffer_m_batch_(0), const_set_if_(false) { // initialize for m_batch=1 dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, 1); dev_psum_values_ = RPU::make_unique<CudaArray<T>>(context_, 1); dev_nsum_values_ = RPU::make_unique<CudaArray<T>>(context_, 1); dev_ravg_scale_value_ = RPU::make_unique<CudaArray<float>>(context_, 1); dev_ravg_scale_value_->setConst(1.0); amaximizer_ = RPU::make_unique<Maximizer<T>>(context_, size, true); maximizer_ = RPU::make_unique<Maximizer<T>>(context_, size, false); size_t temp_storage_bytes = 0; hipcub::DeviceReduce::Reduce( nullptr, temp_storage_bytes, dev_psum_values_->getData(), dev_psum_values_->getData(), size_, nsum_op_, 0, context_->getStream()); dev_v_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); context_->synchronize(); } template <typename T> void NoiseManager<T>::initializeBatchBuffer(int m_batch) { // this inits all the buffers needed for PMSum only !! if ((m_batch > 1) && (buffer_m_batch_ != m_batch)) { buffer_m_batch_ = m_batch; dev_psum_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_psum_values0_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_psum_values0_->setConst((T)0.0); dev_nsum_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_nsum_values0_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_nsum_values0_->setConst((T)0.0); int *offsets = new int[m_batch + 1]; // not trans for (int i = 0; i <= m_batch; i++) { offsets[i] = i * size_; } dev_offsets_ = RPU::make_unique<CudaArray<int>>(context_, m_batch + 1, offsets); size_t temp_storage_bytes = 0; hipcub::DeviceSegmentedReduce::Reduce( nullptr, temp_storage_bytes, dev_psum_values_->getData(), dev_psum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, psum_op_, 0, context_->getStream()); dev_m_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); const_set_if_ = false; context_->synchronize(); delete[] offsets; } } template <typename T> template <typename InputIteratorT> void NoiseManager<T>::computeNPSum(InputIteratorT dev_input, int m_batch, bool trans) { hipStream_t s = context_->getStream(); if (m_batch == 1) { size_t ssz = dev_v_temp_storage_->getSize(); hipcub::DeviceReduce::Reduce( (void *)dev_v_temp_storage_->getData(), ssz, dev_input, dev_psum_values_->getData(), size_, psum_op_, (T)0, s); hipcub::DeviceReduce::Reduce( (void *)dev_v_temp_storage_->getData(), ssz, dev_input, dev_nsum_values_->getData(), size_, nsum_op_, (T)0, s); } else { if (trans) { if (buffer_m_batch_ < m_batch) { this->initializeBatchBuffer(m_batch); } std::swap(dev_psum_values_, dev_psum_values0_); std::swap(dev_nsum_values_, dev_nsum_values0_); int nthreads = context_->getNThreads(); int n = size_ * m_batch; int nblocks = context_->getNBlocks(n, nthreads); if (m_batch <= nthreads) { int shared_mem = 2 * nthreads * sizeof(T); LAUNCH_NPSUM_KERNEL( kernelNPSumBatchTrans, shared_mem, (dev_input, n, m_batch, dev_psum_values_->getData(), dev_nsum_values_->getData(), dev_psum_values0_->getData(), dev_nsum_values0_->getData())); } else { // simple atomic global memory version LAUNCH_NPSUM_KERNEL( kernelNPSumBatchTrans_LargeBatch, 0, (dev_input, n, m_batch, dev_psum_values_->getData(), dev_nsum_values_->getData(), dev_psum_values0_->getData(), dev_nsum_values0_->getData())); } } else { if (buffer_m_batch_ != m_batch) { // !! need to reinitilize offsets when batch changes ! this->initializeBatchBuffer(m_batch); } // Fast Segmented reduction size_t ssz = dev_m_temp_storage_->getSize(); hipcub::DeviceSegmentedReduce::Reduce( (void *)dev_m_temp_storage_->getData(), ssz, dev_input, dev_psum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, psum_op_, (T)0.0, s); hipcub::DeviceSegmentedReduce::Reduce( (void *)dev_m_temp_storage_->getData(), ssz, dev_input, dev_nsum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, nsum_op_, (T)0.0, s); } } } template <typename T> void NoiseManager<T>::setAverageAbsMax(float value) { dev_ravg_scale_value_->setConst(value); dev_scale_values_->setConst(value); ravg_initialized_ = true; context_->synchronize(); } template <typename T> float NoiseManager<T>::getAverageAbsMax() const { float tmp; dev_ravg_scale_value_->copyTo(&tmp); return tmp; }; template <typename T> template <typename InputIteratorT> void NoiseManager<T>::compute( InputIteratorT dev_input, const NoiseManagementType &nm_type, const IOMetaParameter<T> &io, int m_batch, bool trans, bool is_test) { // does not check for positive m_batch! nm_type_ = nm_type; switch (nm_type_) { case NoiseManagementType::None: { return; } case NoiseManagementType::Constant: { if (m_batch > dev_scale_values_->getSize()) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); const_set_if_ = false; } if (!const_set_if_) { dev_scale_values_->setConst(io.nm_thres > 0 ? (float)io.nm_thres : (float)1.0); const_set_if_ = true; } return; } case NoiseManagementType::Max: { this->maximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->maximizer_->saturateAbove(io.nm_thres); } return; } case NoiseManagementType::AbsMax: { this->amaximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->amaximizer_->saturateAbove(io.nm_thres); } return; } case NoiseManagementType::AbsMaxNPSum: { if (m_batch > dev_scale_values_->getSize()) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); } // get amax and npsum this->amaximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->amaximizer_->saturateAbove(io.nm_thres); } this->computeNPSum(dev_input, m_batch, trans); // combine int nthreads = context_->getNThreads(); int nblocks = context_->getNBlocks(m_batch, nthreads); hipStream_t s = context_->getStream(); hipLaunchKernelGGL(( kernelAbsMaxNPSum<T>), dim3(nblocks), dim3(nthreads), 0, s, dev_scale_values_->getData(), m_batch, this->amaximizer_->getMaxValues(), dev_psum_values_->getDataConst(), dev_nsum_values_->getDataConst(), io.out_bound, io.nm_assumed_wmax, io.inp_res > 0 ? io.max_bm_res / io.inp_res : 1.0); return; } case NoiseManagementType::AverageAbsMax: case NoiseManagementType::AverageAbsMaxSingleValue: { // CAUTION: the running average will not be saved for checkpointing... so there might be a // glitch when continueing training from checkpoint... // ALSO: average max is computed across // mbatch whereas for CPU it is based running average of single mat-vecs if ((nm_type_ == NoiseManagementType::AverageAbsMax) && (m_batch > dev_scale_values_->getSize())) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); hipStream_t s = context_->getStream(); int nthreads = context_->getNThreads(); // set scales to ravg [first time, could be set from outside] hipLaunchKernelGGL(( kernelAverageAbsMaxSetScales<T>) , dim3(context_->getNBlocks(m_batch + 1, nthreads)), dim3(nthreads), 0, s, dev_scale_values_->getData(), dev_ravg_scale_value_->getData(), dev_ravg_scale_value_->getDataConst(), m_batch, (T)0.0); } if (!is_test) { this->amaximizer_->compute(dev_input, m_batch, trans); hipStream_t s = context_->getStream(); int nthreads = context_->getNThreads(); int nblocks = context_->getNBlocks(m_batch, nthreads); if (m_batch > 1) { // first compute the average of the max over batch if (!dev_a_temp_storage_) { dev_avgmax_value_ = RPU::make_unique<CudaArray<float>>(context_, 1); size_t temp_storage_bytes = 0; hipcub::DeviceReduce::Sum( nullptr, temp_storage_bytes, amaximizer_->getMaxValues(), dev_avgmax_value_->getData(), m_batch, s); dev_a_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); } size_t ssz = dev_v_temp_storage_->getSize(); hipcub::DeviceReduce::Sum( (void *)dev_v_temp_storage_->getData(), ssz, amaximizer_->getMaxValues(), dev_avgmax_value_->getData(), m_batch, s); } if (nm_type_ == NoiseManagementType::AverageAbsMax) { // now update the running scale and set the current scales constant for all m_batch hipLaunchKernelGGL(( kernelAverageAbsMaxSetScales<T>) , dim3(context_->getNBlocks(m_batch + 1, nthreads)), dim3(nthreads), 0, s, dev_scale_values_->getData(), dev_ravg_scale_value_->getData(), m_batch > 1 ? dev_avgmax_value_->getData() : amaximizer_->getMaxValues(), dev_scale_values_->getSize(), ravg_initialized_ ? MIN(io.nm_decay * m_batch, 1.) : 1.0); } else { // just update the running avg value as only single output requested hipLaunchKernelGGL(( kernelAverageAbsMaxSingleMomentum<T>), dim3(1), dim3(1), 0, s, dev_ravg_scale_value_->getData(), m_batch > 1 ? dev_avgmax_value_->getData() : amaximizer_->getMaxValues(), m_batch, ravg_initialized_ ? MIN(io.nm_decay * m_batch, 1.0) : 1.0); } ravg_initialized_ = true; } return; } default: RPU_FATAL("Noise management type not implemented."); } } template <typename T> float *NoiseManager<T>::getScaleValues() const { switch (nm_type_) { case NoiseManagementType::None: return nullptr; case NoiseManagementType::AbsMaxNPSum: case NoiseManagementType::Constant: case NoiseManagementType::AverageAbsMax: return dev_scale_values_->getData(); case NoiseManagementType::AbsMax: return amaximizer_->getMaxValues(); case NoiseManagementType::Max: return maximizer_->getMaxValues(); case NoiseManagementType::AverageAbsMaxSingleValue: return dev_ravg_scale_value_->getData(); default: RPU_FATAL("Noise management type not implemented."); } }; #define ARGS1(NUM_T) , const NoiseManagementType &, const IOMetaParameter<NUM_T> &, int, bool, bool #define ARGS2 , int, bool template class NoiseManager<float>; RPU_GEN_IITER_TEMPLATES(float, void, NoiseManager<float>::compute, ARGS1(float)); RPU_GEN_IITER_TEMPLATES(float, void, NoiseManager<float>::computeNPSum, ARGS2); #ifdef RPU_USE_DOUBLE template class NoiseManager<double>; RPU_GEN_IITER_TEMPLATES(double, void, NoiseManager<double>::compute, ARGS1(double)); RPU_GEN_IITER_TEMPLATES(double, void, NoiseManager<double>::computeNPSum, ARGS2); #endif #undef ARGS1 #undef ARGS2 #undef LAUNCH_NPSUM_KERNEL } // namespace RPU
e271daa33ef23b48de7d5f0d556434130f3da4eb.cu
/** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "noise_manager.h" #include <chrono> #include <cmath> #include <iostream> #include <memory> #include <random> #include "cuda_math_util.h" #include "cuda_util.h" #include <cub/cub.cuh> #include "io_iterator.h" namespace RPU { template <typename T> __global__ void kernelAbsMaxNPSum( float *scale_values, const int m_batch, const float *amax_values, const T *psum_values, const T *nsum_values, const T out_bound, const T assumed_wmax, const T bm_max // io.max_bm_res/io.inp_res ) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < m_batch) { // w_max*MAX(psum,nsum)/out_bound < scale T w_max = assumed_wmax; T amax = amax_values[tid]; T psum = psum_values[tid]; T nsum = -nsum_values[tid]; T sum = MAX(psum, nsum); scale_values[tid] = MAX(amax, MIN(sum * w_max / out_bound, amax * bm_max)); ; } } template <typename InputIteratorT, typename T> __global__ void kernelNPSumBatchTrans( InputIteratorT input, const int total_size_in, const int m_batch_in, T *psum_values, T *nsum_values, T *psum_values0, T *nsum_values0) { // -- only use this version if m_batch < blockDim.x !!! // -- probably: strided version would be faster... int tid = blockDim.x * blockIdx.x + threadIdx.x; // assumes that shared is of size 2*nthreads*sizeof(T) !!!!!!!!!! extern __shared__ __align__(sizeof(double)) unsigned char rpu_smem_nm[]; T *block_sum_values = reinterpret_cast<T *>(rpu_smem_nm); T *block_psum_values = &block_sum_values[0]; T *block_nsum_values = &block_sum_values[blockDim.x]; const int size = total_size_in; const int m_batch = m_batch_in; block_psum_values[threadIdx.x] = (T)0.0; block_nsum_values[threadIdx.x] = (T)0.0; __syncthreads(); if (tid < m_batch) { psum_values0[tid] = (T)0.0; nsum_values0[tid] = (T)0.0; } if (tid < size) { T value = input[tid]; int midx = tid % m_batch; if (value >= 0) { atomicAdd(&(block_psum_values[midx]), value); } else { atomicAdd(&(block_nsum_values[midx]), value); } } __syncthreads(); int bidx = threadIdx.x; if (bidx < m_batch) { atomicAdd(&(psum_values[bidx]), block_psum_values[bidx]); atomicAdd(&(nsum_values[bidx]), block_nsum_values[bidx]); } } template <typename InputIteratorT, typename T> __global__ void kernelNPSumBatchTrans_LargeBatch( InputIteratorT input, const int total_size_in, const int m_batch_in, T *psum_values, T *nsum_values, T *psum_values0, T *nsum_values0) { // -- use this version if m_batch >= blockDim.x // -- just uses atomic on global memory int tid = blockDim.x * blockIdx.x + threadIdx.x; const int size = total_size_in; const int m_batch = m_batch_in; if (tid < m_batch) { psum_values0[tid] = (T)0.0; nsum_values0[tid] = (T)0.0; } if (tid < size) { T value = input[tid]; int midx = tid % m_batch; if (value >= 0) { atomicAdd(&psum_values[midx], value); } else { atomicAdd(&nsum_values[midx], value); } } } template <typename T> __global__ void kernelAverageAbsMaxSetScales( float *scales, float *ravg, const float *sum, const int m_batch_in, T decay_rate_in) { int tid = blockDim.x * blockIdx.x + threadIdx.x; const int m_batch = m_batch_in; T decay_rate = decay_rate_in; T max_avg = (*sum) / m_batch; T run_avg = *ravg; if (tid < m_batch) { scales[tid] = (float)(run_avg * (1.0 - decay_rate) + decay_rate * max_avg); } if (tid == m_batch) { *ravg = (float)(run_avg * (1.0 - decay_rate) + decay_rate * max_avg); } } template <typename T> __global__ void kernelAverageAbsMaxSingleMomentum(float *ravg, const float *sum, const int m_batch, T decay_rate) { // just single block! int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { T max_avg = (*sum) / m_batch; *ravg = (float)(*ravg * (1.0 - decay_rate) + decay_rate * max_avg); } } /****************************************************************************************************************/ /* NoiseManager */ /******************************************************************************************************************/ #define LAUNCH_NPSUM_KERNEL(KNAME, SHARED_MEM, ARGS) \ KNAME<InputIteratorT, T><<<nblocks, nthreads, SHARED_MEM, s>>> ARGS; template <typename T> NoiseManager<T>::NoiseManager(CudaContext *c, int size) : size_(size), context_(c), buffer_m_batch_(0), const_set_if_(false) { // initialize for m_batch=1 dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, 1); dev_psum_values_ = RPU::make_unique<CudaArray<T>>(context_, 1); dev_nsum_values_ = RPU::make_unique<CudaArray<T>>(context_, 1); dev_ravg_scale_value_ = RPU::make_unique<CudaArray<float>>(context_, 1); dev_ravg_scale_value_->setConst(1.0); amaximizer_ = RPU::make_unique<Maximizer<T>>(context_, size, true); maximizer_ = RPU::make_unique<Maximizer<T>>(context_, size, false); size_t temp_storage_bytes = 0; cub::DeviceReduce::Reduce( nullptr, temp_storage_bytes, dev_psum_values_->getData(), dev_psum_values_->getData(), size_, nsum_op_, 0, context_->getStream()); dev_v_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); context_->synchronize(); } template <typename T> void NoiseManager<T>::initializeBatchBuffer(int m_batch) { // this inits all the buffers needed for PMSum only !! if ((m_batch > 1) && (buffer_m_batch_ != m_batch)) { buffer_m_batch_ = m_batch; dev_psum_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_psum_values0_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_psum_values0_->setConst((T)0.0); dev_nsum_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_nsum_values0_ = RPU::make_unique<CudaArray<T>>(context_, m_batch); dev_nsum_values0_->setConst((T)0.0); int *offsets = new int[m_batch + 1]; // not trans for (int i = 0; i <= m_batch; i++) { offsets[i] = i * size_; } dev_offsets_ = RPU::make_unique<CudaArray<int>>(context_, m_batch + 1, offsets); size_t temp_storage_bytes = 0; cub::DeviceSegmentedReduce::Reduce( nullptr, temp_storage_bytes, dev_psum_values_->getData(), dev_psum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, psum_op_, 0, context_->getStream()); dev_m_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); const_set_if_ = false; context_->synchronize(); delete[] offsets; } } template <typename T> template <typename InputIteratorT> void NoiseManager<T>::computeNPSum(InputIteratorT dev_input, int m_batch, bool trans) { cudaStream_t s = context_->getStream(); if (m_batch == 1) { size_t ssz = dev_v_temp_storage_->getSize(); cub::DeviceReduce::Reduce( (void *)dev_v_temp_storage_->getData(), ssz, dev_input, dev_psum_values_->getData(), size_, psum_op_, (T)0, s); cub::DeviceReduce::Reduce( (void *)dev_v_temp_storage_->getData(), ssz, dev_input, dev_nsum_values_->getData(), size_, nsum_op_, (T)0, s); } else { if (trans) { if (buffer_m_batch_ < m_batch) { this->initializeBatchBuffer(m_batch); } std::swap(dev_psum_values_, dev_psum_values0_); std::swap(dev_nsum_values_, dev_nsum_values0_); int nthreads = context_->getNThreads(); int n = size_ * m_batch; int nblocks = context_->getNBlocks(n, nthreads); if (m_batch <= nthreads) { int shared_mem = 2 * nthreads * sizeof(T); LAUNCH_NPSUM_KERNEL( kernelNPSumBatchTrans, shared_mem, (dev_input, n, m_batch, dev_psum_values_->getData(), dev_nsum_values_->getData(), dev_psum_values0_->getData(), dev_nsum_values0_->getData())); } else { // simple atomic global memory version LAUNCH_NPSUM_KERNEL( kernelNPSumBatchTrans_LargeBatch, 0, (dev_input, n, m_batch, dev_psum_values_->getData(), dev_nsum_values_->getData(), dev_psum_values0_->getData(), dev_nsum_values0_->getData())); } } else { if (buffer_m_batch_ != m_batch) { // !! need to reinitilize offsets when batch changes ! this->initializeBatchBuffer(m_batch); } // Fast Segmented reduction size_t ssz = dev_m_temp_storage_->getSize(); cub::DeviceSegmentedReduce::Reduce( (void *)dev_m_temp_storage_->getData(), ssz, dev_input, dev_psum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, psum_op_, (T)0.0, s); cub::DeviceSegmentedReduce::Reduce( (void *)dev_m_temp_storage_->getData(), ssz, dev_input, dev_nsum_values_->getData(), m_batch, dev_offsets_->getData(), dev_offsets_->getData() + 1, nsum_op_, (T)0.0, s); } } } template <typename T> void NoiseManager<T>::setAverageAbsMax(float value) { dev_ravg_scale_value_->setConst(value); dev_scale_values_->setConst(value); ravg_initialized_ = true; context_->synchronize(); } template <typename T> float NoiseManager<T>::getAverageAbsMax() const { float tmp; dev_ravg_scale_value_->copyTo(&tmp); return tmp; }; template <typename T> template <typename InputIteratorT> void NoiseManager<T>::compute( InputIteratorT dev_input, const NoiseManagementType &nm_type, const IOMetaParameter<T> &io, int m_batch, bool trans, bool is_test) { // does not check for positive m_batch! nm_type_ = nm_type; switch (nm_type_) { case NoiseManagementType::None: { return; } case NoiseManagementType::Constant: { if (m_batch > dev_scale_values_->getSize()) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); const_set_if_ = false; } if (!const_set_if_) { dev_scale_values_->setConst(io.nm_thres > 0 ? (float)io.nm_thres : (float)1.0); const_set_if_ = true; } return; } case NoiseManagementType::Max: { this->maximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->maximizer_->saturateAbove(io.nm_thres); } return; } case NoiseManagementType::AbsMax: { this->amaximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->amaximizer_->saturateAbove(io.nm_thres); } return; } case NoiseManagementType::AbsMaxNPSum: { if (m_batch > dev_scale_values_->getSize()) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); } // get amax and npsum this->amaximizer_->compute(dev_input, m_batch, trans); if (io.nm_thres > 0) { this->amaximizer_->saturateAbove(io.nm_thres); } this->computeNPSum(dev_input, m_batch, trans); // combine int nthreads = context_->getNThreads(); int nblocks = context_->getNBlocks(m_batch, nthreads); cudaStream_t s = context_->getStream(); kernelAbsMaxNPSum<T><<<nblocks, nthreads, 0, s>>>( dev_scale_values_->getData(), m_batch, this->amaximizer_->getMaxValues(), dev_psum_values_->getDataConst(), dev_nsum_values_->getDataConst(), io.out_bound, io.nm_assumed_wmax, io.inp_res > 0 ? io.max_bm_res / io.inp_res : 1.0); return; } case NoiseManagementType::AverageAbsMax: case NoiseManagementType::AverageAbsMaxSingleValue: { // CAUTION: the running average will not be saved for checkpointing... so there might be a // glitch when continueing training from checkpoint... // ALSO: average max is computed across // mbatch whereas for CPU it is based running average of single mat-vecs if ((nm_type_ == NoiseManagementType::AverageAbsMax) && (m_batch > dev_scale_values_->getSize())) { dev_scale_values_ = RPU::make_unique<CudaArray<float>>(context_, m_batch); cudaStream_t s = context_->getStream(); int nthreads = context_->getNThreads(); // set scales to ravg [first time, could be set from outside] kernelAverageAbsMaxSetScales<T> <<<context_->getNBlocks(m_batch + 1, nthreads), nthreads, 0, s>>>( dev_scale_values_->getData(), dev_ravg_scale_value_->getData(), dev_ravg_scale_value_->getDataConst(), m_batch, (T)0.0); } if (!is_test) { this->amaximizer_->compute(dev_input, m_batch, trans); cudaStream_t s = context_->getStream(); int nthreads = context_->getNThreads(); int nblocks = context_->getNBlocks(m_batch, nthreads); if (m_batch > 1) { // first compute the average of the max over batch if (!dev_a_temp_storage_) { dev_avgmax_value_ = RPU::make_unique<CudaArray<float>>(context_, 1); size_t temp_storage_bytes = 0; cub::DeviceReduce::Sum( nullptr, temp_storage_bytes, amaximizer_->getMaxValues(), dev_avgmax_value_->getData(), m_batch, s); dev_a_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, temp_storage_bytes); } size_t ssz = dev_v_temp_storage_->getSize(); cub::DeviceReduce::Sum( (void *)dev_v_temp_storage_->getData(), ssz, amaximizer_->getMaxValues(), dev_avgmax_value_->getData(), m_batch, s); } if (nm_type_ == NoiseManagementType::AverageAbsMax) { // now update the running scale and set the current scales constant for all m_batch kernelAverageAbsMaxSetScales<T> <<<context_->getNBlocks(m_batch + 1, nthreads), nthreads, 0, s>>>( dev_scale_values_->getData(), dev_ravg_scale_value_->getData(), m_batch > 1 ? dev_avgmax_value_->getData() : amaximizer_->getMaxValues(), dev_scale_values_->getSize(), ravg_initialized_ ? MIN(io.nm_decay * m_batch, 1.) : 1.0); } else { // just update the running avg value as only single output requested kernelAverageAbsMaxSingleMomentum<T><<<1, 1, 0, s>>>( dev_ravg_scale_value_->getData(), m_batch > 1 ? dev_avgmax_value_->getData() : amaximizer_->getMaxValues(), m_batch, ravg_initialized_ ? MIN(io.nm_decay * m_batch, 1.0) : 1.0); } ravg_initialized_ = true; } return; } default: RPU_FATAL("Noise management type not implemented."); } } template <typename T> float *NoiseManager<T>::getScaleValues() const { switch (nm_type_) { case NoiseManagementType::None: return nullptr; case NoiseManagementType::AbsMaxNPSum: case NoiseManagementType::Constant: case NoiseManagementType::AverageAbsMax: return dev_scale_values_->getData(); case NoiseManagementType::AbsMax: return amaximizer_->getMaxValues(); case NoiseManagementType::Max: return maximizer_->getMaxValues(); case NoiseManagementType::AverageAbsMaxSingleValue: return dev_ravg_scale_value_->getData(); default: RPU_FATAL("Noise management type not implemented."); } }; #define ARGS1(NUM_T) , const NoiseManagementType &, const IOMetaParameter<NUM_T> &, int, bool, bool #define ARGS2 , int, bool template class NoiseManager<float>; RPU_GEN_IITER_TEMPLATES(float, void, NoiseManager<float>::compute, ARGS1(float)); RPU_GEN_IITER_TEMPLATES(float, void, NoiseManager<float>::computeNPSum, ARGS2); #ifdef RPU_USE_DOUBLE template class NoiseManager<double>; RPU_GEN_IITER_TEMPLATES(double, void, NoiseManager<double>::compute, ARGS1(double)); RPU_GEN_IITER_TEMPLATES(double, void, NoiseManager<double>::computeNPSum, ARGS2); #endif #undef ARGS1 #undef ARGS2 #undef LAUNCH_NPSUM_KERNEL } // namespace RPU
d4d3edd4ff75716eb1c47f606787bba116d28736.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" //#include "device_atomic_functions.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/custom_layers.hpp" namespace caffe { template <typename Dtype> __global__ void FixCoordinate(const int n, Dtype* in_out, Dtype min_value, Dtype max_value) { CUDA_KERNEL_LOOP(index, n) { in_out[index] = (in_out[index] < min_value && in_out[index] > min_value - 1e-4) ? min_value : in_out[index]; in_out[index] = (in_out[index] > max_value && in_out[index] < (max_value + 1e-4)) ? max_value : in_out[index]; } } template <typename Dtype> __global__ void TransformerForward(const int num, const int channels, const int spatial_dim, const int height, const int width, const Dtype* data, Dtype* CoordinateSource_data, Dtype* transformed_data) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; Dtype x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (Dtype)height / 2; Dtype y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (Dtype)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { transformed_data[(((n * channels + c) * height + h) * width) + w] += data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (1 - abs(y - yy)); } } } } } } template <typename Dtype> __global__ void TransformerBackward(const int num, const int channels, const int spatial_dim, const int height, const int width, const Dtype* data, const Dtype* CoordinateSource_data, const Dtype* top_diff, Dtype* data_diff, Dtype* CoordinateSource_diff); template <> __global__ void TransformerBackward<float>(const int num, const int channels, const int spatial_dim, const int height, const int width, const float* data, const float* CoordinateSource_data, const float* top_diff, float* data_diff, float* CoordinateSource_diff) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; float x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (float)height / 2; float y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (float)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { atomicAdd(&data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w] * (1 - abs(x - xx)) * (1 - abs(y - yy))); //printf("(%d,%d,%d,%d)(%f,%f)(%d,%d)(%f,%f)\n", n, c, h, w, x, y, xx, yy, data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w]); if ((xx - x)>float(0)) CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (float)height / 2; else CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (float)height / 2; if ((yy - y) > float(0)) CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (float)width / 2; else CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (float)width / 2; } } } } } } __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> __global__ void TransformerBackward<double>(const int num, const int channels, const int spatial_dim, const int height, const int width, const double* data, const double* CoordinateSource_data, const double* top_diff, double* data_diff, double* CoordinateSource_diff) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; double x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (double)height / 2; double y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (double)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { //atomicAdd do not support double float. Please avoid using Net<double>. atomicAdd(&data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w] * (1 - abs(x - xx)) * (1 - abs(y - yy))); //printf("(%d,%d,%d,%d)(%f,%f)(%d,%d)(%f,%f)\n", n, c, h, w, x, y, xx, yy, data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w]); if ((xx - x)>double(0)) CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (double)height / 2; else CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (double)height / 2; if ((yy - y) > double(0)) CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (double)width / 2; else CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (double)width / 2; } } } } } } template <typename Dtype> void TransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* theta_data = bottom[1]->gpu_data(); const Dtype* CoordinateTarget_data = CoordinateTarget.gpu_data(); Dtype* CoordinateSource_data = CoordinateSource.mutable_gpu_data(); int num = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3); caffe_gpu_set<Dtype>(top[0]->count(), 0, top_data);//why memset cause error? for (int n = 0; n < num; n++) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 2, spatial_dim, 3, Dtype(1), theta_data + n * 6, CoordinateTarget_data, Dtype(0), CoordinateSource_data + n * 2 * spatial_dim); FixCoordinate<Dtype> << <CAFFE_GET_BLOCKS(spatial_dim), CAFFE_CUDA_NUM_THREADS >> >( spatial_dim, CoordinateSource_data + n * 2 * spatial_dim, -1, 1 - 2 / (Dtype)bottom[0]->shape(2));//height = 10, then max = 9/5-1=0.8 FixCoordinate<Dtype> << <CAFFE_GET_BLOCKS(spatial_dim), CAFFE_CUDA_NUM_THREADS >> >( spatial_dim, CoordinateSource_data + n * 2 * spatial_dim + spatial_dim, -1, 1 - 2 / (Dtype)bottom[0]->shape(3)); } TransformerForward<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom[0]->shape(2), bottom[0]->shape(3), bottom_data, CoordinateSource_data, top_data); } template <typename Dtype> void TransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* data_diff = bottom[0]->mutable_gpu_diff(); Dtype* theta_diff = bottom[1]->mutable_gpu_diff(); int num = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3); const Dtype* CoordinateTarget_data = CoordinateTarget.gpu_data(); const Dtype* CoordinateSource_data = CoordinateSource.gpu_data(); Dtype* CoordinateSource_diff = CoordinateSource.mutable_gpu_diff(); caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff); caffe_gpu_set<Dtype>(CoordinateSource.count(), 0, CoordinateSource_diff); TransformerBackward<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom[0]->shape(2), bottom[0]->shape(3), bottom_data, CoordinateSource_data, top_diff, data_diff, CoordinateSource_diff); for (int n = 0; n < num; n++) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 2, 3, spatial_dim, Dtype(1), CoordinateSource_diff + n * 2 * spatial_dim, CoordinateTarget_data, Dtype(0), theta_diff + n * 6); } } INSTANTIATE_LAYER_GPU_FUNCS(TransformerLayer); } // namespace caffe
d4d3edd4ff75716eb1c47f606787bba116d28736.cu
#include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" //#include "device_atomic_functions.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/custom_layers.hpp" namespace caffe { template <typename Dtype> __global__ void FixCoordinate(const int n, Dtype* in_out, Dtype min_value, Dtype max_value) { CUDA_KERNEL_LOOP(index, n) { in_out[index] = (in_out[index] < min_value && in_out[index] > min_value - 1e-4) ? min_value : in_out[index]; in_out[index] = (in_out[index] > max_value && in_out[index] < (max_value + 1e-4)) ? max_value : in_out[index]; } } template <typename Dtype> __global__ void TransformerForward(const int num, const int channels, const int spatial_dim, const int height, const int width, const Dtype* data, Dtype* CoordinateSource_data, Dtype* transformed_data) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; Dtype x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (Dtype)height / 2; Dtype y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (Dtype)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { transformed_data[(((n * channels + c) * height + h) * width) + w] += data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (1 - abs(y - yy)); } } } } } } template <typename Dtype> __global__ void TransformerBackward(const int num, const int channels, const int spatial_dim, const int height, const int width, const Dtype* data, const Dtype* CoordinateSource_data, const Dtype* top_diff, Dtype* data_diff, Dtype* CoordinateSource_diff); template <> __global__ void TransformerBackward<float>(const int num, const int channels, const int spatial_dim, const int height, const int width, const float* data, const float* CoordinateSource_data, const float* top_diff, float* data_diff, float* CoordinateSource_diff) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; float x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (float)height / 2; float y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (float)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { atomicAdd(&data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w] * (1 - abs(x - xx)) * (1 - abs(y - yy))); //printf("(%d,%d,%d,%d)(%f,%f)(%d,%d)(%f,%f)\n", n, c, h, w, x, y, xx, yy, data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w]); if ((xx - x)>float(0)) CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (float)height / 2; else CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (float)height / 2; if ((yy - y) > float(0)) CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (float)width / 2; else CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (float)width / 2; } } } } } } __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> __global__ void TransformerBackward<double>(const int num, const int channels, const int spatial_dim, const int height, const int width, const double* data, const double* CoordinateSource_data, const double* top_diff, double* data_diff, double* CoordinateSource_diff) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; int h = s / width; int w = s % width; double x = CoordinateSource_data[n * 2 * spatial_dim + h * width + w] * height / 2 + (double)height / 2; double y = CoordinateSource_data[n * 2 * spatial_dim + spatial_dim + h * width + w] * width / 2 + (double)width / 2; if (x >= 0 && x <= height - 1 && y >= 0 && y <= width - 1) { for (int c = 0; c < channels; c++) { for (int xx = floor(x); xx <= ceil(x); xx++) { for (int yy = floor(y); yy <= ceil(y); yy++) { //atomicAdd do not support double float. Please avoid using Net<double>. atomicAdd(&data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w] * (1 - abs(x - xx)) * (1 - abs(y - yy))); //printf("(%d,%d,%d,%d)(%f,%f)(%d,%d)(%f,%f)\n", n, c, h, w, x, y, xx, yy, data_diff[(((n * channels + c) * height + xx) * width) + yy], top_diff[(((n * channels + c) * height + h) * width) + w]); if ((xx - x)>double(0)) CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (double)height / 2; else CoordinateSource_diff[n * 2 * spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(y - yy)) * (double)height / 2; if ((yy - y) > double(0)) CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] += top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (double)width / 2; else CoordinateSource_diff[n * 2 * spatial_dim + spatial_dim + h * width + w] -= top_diff[(((n * channels + c) * height + h) * width) + w] * data[(((n * channels + c) * height + xx) * width) + yy] * (1 - abs(x - xx)) * (double)width / 2; } } } } } } template <typename Dtype> void TransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* theta_data = bottom[1]->gpu_data(); const Dtype* CoordinateTarget_data = CoordinateTarget.gpu_data(); Dtype* CoordinateSource_data = CoordinateSource.mutable_gpu_data(); int num = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3); caffe_gpu_set<Dtype>(top[0]->count(), 0, top_data);//why memset cause error? for (int n = 0; n < num; n++) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 2, spatial_dim, 3, Dtype(1), theta_data + n * 6, CoordinateTarget_data, Dtype(0), CoordinateSource_data + n * 2 * spatial_dim); FixCoordinate<Dtype> << <CAFFE_GET_BLOCKS(spatial_dim), CAFFE_CUDA_NUM_THREADS >> >( spatial_dim, CoordinateSource_data + n * 2 * spatial_dim, -1, 1 - 2 / (Dtype)bottom[0]->shape(2));//height = 10, then max = 9/5-1=0.8 FixCoordinate<Dtype> << <CAFFE_GET_BLOCKS(spatial_dim), CAFFE_CUDA_NUM_THREADS >> >( spatial_dim, CoordinateSource_data + n * 2 * spatial_dim + spatial_dim, -1, 1 - 2 / (Dtype)bottom[0]->shape(3)); } TransformerForward<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom[0]->shape(2), bottom[0]->shape(3), bottom_data, CoordinateSource_data, top_data); } template <typename Dtype> void TransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* data_diff = bottom[0]->mutable_gpu_diff(); Dtype* theta_diff = bottom[1]->mutable_gpu_diff(); int num = bottom[0]->shape(0); int channels = bottom[0]->shape(1); int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3); const Dtype* CoordinateTarget_data = CoordinateTarget.gpu_data(); const Dtype* CoordinateSource_data = CoordinateSource.gpu_data(); Dtype* CoordinateSource_diff = CoordinateSource.mutable_gpu_diff(); caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff); caffe_gpu_set<Dtype>(CoordinateSource.count(), 0, CoordinateSource_diff); TransformerBackward<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, bottom[0]->shape(2), bottom[0]->shape(3), bottom_data, CoordinateSource_data, top_diff, data_diff, CoordinateSource_diff); for (int n = 0; n < num; n++) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 2, 3, spatial_dim, Dtype(1), CoordinateSource_diff + n * 2 * spatial_dim, CoordinateTarget_data, Dtype(0), theta_diff + n * 6); } } INSTANTIATE_LAYER_GPU_FUNCS(TransformerLayer); } // namespace caffe
615a91851080826537bd84fd2ff2bbfd9cd211ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void concatCuda(void* pVx, void* pxShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) { T* z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong zLen, totalThreads; __shared__ int rank; if (threadIdx.x == 0) { zLen = shape::length(zShapeInfo); rank = shape::rank(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; int coords[MAX_RANK]; for (uint64_t i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); int inArrIdx = 0; Nd4jLong *xShapeInfo = reinterpret_cast<Nd4jLong **>(pxShapeInfo)[inArrIdx]; while (coords[axis] >= xShapeInfo[axis + 1]) { coords[axis] -= xShapeInfo[axis + 1]; xShapeInfo = reinterpret_cast<Nd4jLong **>(pxShapeInfo)[++inArrIdx]; } const auto *x = reinterpret_cast<T *>(reinterpret_cast<void **>(pVx)[inArrIdx]); const auto xOffset = shape::getOffset(xShapeInfo, coords); z[zOffset] = x[xOffset]; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void concatCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, void* pVx, void* pxShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) { hipLaunchKernelGGL(( concatCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, pVx, pxShapeInfo, vz, zShapeInfo, axis); } ////////////////////////////////////////////////////////////////////////// void concat(sd::LaunchContext * context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int axis) { const int numOfInArrs = inArrs.size(); const auto sizeofT = output.sizeOfT(); NDArray::prepareSpecialUse({&output}, inArrs); bool luckCase1 = ((axis == 0 && output.ordering() == 'c') || (axis == output.rankOf() - 1 && output.ordering() == 'f')) && output.ews() == 1; if(luckCase1) { for (uint i = 0; i < numOfInArrs; ++i) { luckCase1 &= inArrs[i]->ordering() == output.ordering() && inArrs[i]->ews() == 1; if(!luckCase1) break; } } if(luckCase1) { // for example {1,10} + {2,10} + {3,10} = {6, 10} order c; or {10,1} + {10,2} + {10,3} = {10, 6} order f void* z = static_cast<int8_t*>(output.specialBuffer()); for (uint i = 0; i < numOfInArrs; ++i) { const auto memAmountToCopy = inArrs[i]->lengthOf() * sizeofT; hipMemcpyAsync(z, reinterpret_cast<const int8_t*>(inArrs[i]->specialBuffer()), memAmountToCopy, hipMemcpyDeviceToDevice, *context->getCudaStream()); z = static_cast<int8_t*>(z) + memAmountToCopy; } if(hipStreamSynchronize(*context->getCudaStream()) != 0) throw std::runtime_error("concat cuda: luckCase1 failed!"); for(int i = 0; i < numOfInArrs; ++i) inArrs[i]->tickReadDevice(); output.tickWriteDevice(); return; } // const bool isZcontin = output.strideAt(axis) == 1; // bool areInputsContin = true; // bool allSameOrder = true; // std::vector<Nd4jLong> strideOfContigStride(numOfInArrs); // if(isZcontin) { // for (uint i = 0; i < inArrs.size(); ++i) { // areInputsContin &= inArrs[i]->strideAt(axis) == 1; // allSameOrder &= output.ordering() == inArrs[i]->ordering(); // if(!areInputsContin || !allSameOrder) // break; // strideOfContigStride[i] = shape::strideOverContigAxis(axis, inArrs[i]->shapeInfo()); // } // } // const bool luckCase2 = isZcontin && areInputsContin && allSameOrder; // if(luckCase2) { // for example {2,1,3} + {2,5,3} + {2,10,3} = {2,16,3}, here axis 1 shoud have stride = 1 for all inputs arrays and output array // const auto zStep = shape::strideOverContigAxis(axis, output.shapeInfo()); // for (uint i = 0; i < output.lengthOf() / output.sizeAt(axis); ++i) { // const auto iShift = i * sizeofT; // void* z = static_cast<int8_t*>(output.specialBuffer()) + zStep * iShift; // for (uint j = 0; j < numOfInArrs; ++j) { // const auto xDim = inArrs[j]->sizeAt(axis); // void* x = static_cast<int8_t*>(inArrs[j]->specialBuffer()) + strideOfContigStride[j] * iShift; // const auto memSizeToCopy = xDim * sizeofT; // hipMemcpyAsync(z, x, memSizeToCopy, hipMemcpyDeviceToDevice, *context->getCudaStream()); // z = static_cast<int8_t*>(z) + memSizeToCopy; // } // } // if(hipStreamSynchronize(*context->getCudaStream()) != 0) // throw std::runtime_error("concat cuda: luckCase2 failed!"); // } // else { // general (slower) case const int threadsPerBlock = 256; const int blocksPerGrid = 512; const int sharedMem = 512; // prepare arrays of pointers on buffers and shapes std::vector<const void*> hInBuffers(numOfInArrs); std::vector<const Nd4jLong*> hInShapeInfo(numOfInArrs); for(int i = 0; i < numOfInArrs; ++i) { hInBuffers[i] = inArrs[i]->specialBuffer(); hInShapeInfo[i] = inArrs[i]->specialShapeInfo(); } PointersManager manager(context, "helpers::concat"); void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*)); void* dInShapeInfo = manager.replicatePointer(hInShapeInfo.data(), hInShapeInfo.size() * sizeof(Nd4jLong*)); BUILD_SINGLE_SELECTOR(inArrs[0]->dataType(), concatCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), dInBuffers, dInShapeInfo, output.specialBuffer(), output.specialShapeInfo(), axis), LIBND4J_TYPES); manager.synchronize(); // } NDArray::registerSpecialUse({&output}, inArrs); } } } }
615a91851080826537bd84fd2ff2bbfd9cd211ee.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void concatCuda(void* pVx, void* pxShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) { T* z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong zLen, totalThreads; __shared__ int rank; if (threadIdx.x == 0) { zLen = shape::length(zShapeInfo); rank = shape::rank(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; int coords[MAX_RANK]; for (uint64_t i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); int inArrIdx = 0; Nd4jLong *xShapeInfo = reinterpret_cast<Nd4jLong **>(pxShapeInfo)[inArrIdx]; while (coords[axis] >= xShapeInfo[axis + 1]) { coords[axis] -= xShapeInfo[axis + 1]; xShapeInfo = reinterpret_cast<Nd4jLong **>(pxShapeInfo)[++inArrIdx]; } const auto *x = reinterpret_cast<T *>(reinterpret_cast<void **>(pVx)[inArrIdx]); const auto xOffset = shape::getOffset(xShapeInfo, coords); z[zOffset] = x[xOffset]; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void concatCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, void* pVx, void* pxShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) { concatCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(pVx, pxShapeInfo, vz, zShapeInfo, axis); } ////////////////////////////////////////////////////////////////////////// void concat(sd::LaunchContext * context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int axis) { const int numOfInArrs = inArrs.size(); const auto sizeofT = output.sizeOfT(); NDArray::prepareSpecialUse({&output}, inArrs); bool luckCase1 = ((axis == 0 && output.ordering() == 'c') || (axis == output.rankOf() - 1 && output.ordering() == 'f')) && output.ews() == 1; if(luckCase1) { for (uint i = 0; i < numOfInArrs; ++i) { luckCase1 &= inArrs[i]->ordering() == output.ordering() && inArrs[i]->ews() == 1; if(!luckCase1) break; } } if(luckCase1) { // for example {1,10} + {2,10} + {3,10} = {6, 10} order c; or {10,1} + {10,2} + {10,3} = {10, 6} order f void* z = static_cast<int8_t*>(output.specialBuffer()); for (uint i = 0; i < numOfInArrs; ++i) { const auto memAmountToCopy = inArrs[i]->lengthOf() * sizeofT; cudaMemcpyAsync(z, reinterpret_cast<const int8_t*>(inArrs[i]->specialBuffer()), memAmountToCopy, cudaMemcpyDeviceToDevice, *context->getCudaStream()); z = static_cast<int8_t*>(z) + memAmountToCopy; } if(cudaStreamSynchronize(*context->getCudaStream()) != 0) throw std::runtime_error("concat cuda: luckCase1 failed!"); for(int i = 0; i < numOfInArrs; ++i) inArrs[i]->tickReadDevice(); output.tickWriteDevice(); return; } // const bool isZcontin = output.strideAt(axis) == 1; // bool areInputsContin = true; // bool allSameOrder = true; // std::vector<Nd4jLong> strideOfContigStride(numOfInArrs); // if(isZcontin) { // for (uint i = 0; i < inArrs.size(); ++i) { // areInputsContin &= inArrs[i]->strideAt(axis) == 1; // allSameOrder &= output.ordering() == inArrs[i]->ordering(); // if(!areInputsContin || !allSameOrder) // break; // strideOfContigStride[i] = shape::strideOverContigAxis(axis, inArrs[i]->shapeInfo()); // } // } // const bool luckCase2 = isZcontin && areInputsContin && allSameOrder; // if(luckCase2) { // for example {2,1,3} + {2,5,3} + {2,10,3} = {2,16,3}, here axis 1 shoud have stride = 1 for all inputs arrays and output array // const auto zStep = shape::strideOverContigAxis(axis, output.shapeInfo()); // for (uint i = 0; i < output.lengthOf() / output.sizeAt(axis); ++i) { // const auto iShift = i * sizeofT; // void* z = static_cast<int8_t*>(output.specialBuffer()) + zStep * iShift; // for (uint j = 0; j < numOfInArrs; ++j) { // const auto xDim = inArrs[j]->sizeAt(axis); // void* x = static_cast<int8_t*>(inArrs[j]->specialBuffer()) + strideOfContigStride[j] * iShift; // const auto memSizeToCopy = xDim * sizeofT; // cudaMemcpyAsync(z, x, memSizeToCopy, cudaMemcpyDeviceToDevice, *context->getCudaStream()); // z = static_cast<int8_t*>(z) + memSizeToCopy; // } // } // if(cudaStreamSynchronize(*context->getCudaStream()) != 0) // throw std::runtime_error("concat cuda: luckCase2 failed!"); // } // else { // general (slower) case const int threadsPerBlock = 256; const int blocksPerGrid = 512; const int sharedMem = 512; // prepare arrays of pointers on buffers and shapes std::vector<const void*> hInBuffers(numOfInArrs); std::vector<const Nd4jLong*> hInShapeInfo(numOfInArrs); for(int i = 0; i < numOfInArrs; ++i) { hInBuffers[i] = inArrs[i]->specialBuffer(); hInShapeInfo[i] = inArrs[i]->specialShapeInfo(); } PointersManager manager(context, "helpers::concat"); void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*)); void* dInShapeInfo = manager.replicatePointer(hInShapeInfo.data(), hInShapeInfo.size() * sizeof(Nd4jLong*)); BUILD_SINGLE_SELECTOR(inArrs[0]->dataType(), concatCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), dInBuffers, dInShapeInfo, output.specialBuffer(), output.specialShapeInfo(), axis), LIBND4J_TYPES); manager.synchronize(); // } NDArray::registerSpecialUse({&output}, inArrs); } } } }
3233b64463dd4c5544230a0df3a5d0a90ddb43b9.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_complex.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> __global__ void multiply_const_kernel(cuFloatComplex *in, cuFloatComplex *out, cuFloatComplex k, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { // eix = cosx + isinx out[i] = cuCmulf(in[i], k); } } void exec_multiply_const(cuFloatComplex *in, cuFloatComplex *out, cuFloatComplex k, int n, int grid_size, int block_size, hipStream_t stream) { hipLaunchKernelGGL(( multiply_const_kernel), dim3(grid_size), dim3(block_size), 0, stream, in, out, k, n); } void get_block_and_grid_multiply_const(int *minGrid, int *minBlock) { hipOccupancyMaxPotentialBlockSize(minGrid, minBlock, multiply_const_kernel, 0, 0); }
3233b64463dd4c5544230a0df3a5d0a90ddb43b9.cu
#include <cuComplex.h> #include <cuda.h> #include <cuda_runtime.h> __global__ void multiply_const_kernel(cuFloatComplex *in, cuFloatComplex *out, cuFloatComplex k, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { // e ix = cos x + i sin x out[i] = cuCmulf(in[i], k); } } void exec_multiply_const(cuFloatComplex *in, cuFloatComplex *out, cuFloatComplex k, int n, int grid_size, int block_size, cudaStream_t stream) { multiply_const_kernel<<<grid_size, block_size, 0, stream>>>(in, out, k, n); } void get_block_and_grid_multiply_const(int *minGrid, int *minBlock) { cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock, multiply_const_kernel, 0, 0); }
86df70d21af779d91f7d8135be3586fd1cbe4cd8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2013-2014, Gregory P. Meyer University of Illinois Board of Trustees All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder(s) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <dip/common/error.h> #include <dip/common/types.h> #define FILTER_HALF_WIDTH 3 #define BLOCK_WIDTH 16 namespace dip { __global__ void BilateralFilter(float sigma_d, float sigma_r, int width, int height, const Depth *depth, Depth *filtered_depth) { // Allocate Shared Memory __shared__ Depth ds[BLOCK_WIDTH][BLOCK_WIDTH]; // Get Block and Thread Id int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Calculate Row & Column int col = tx + bx * BLOCK_WIDTH; int row = ty + by * BLOCK_WIDTH; // Cooperative Load of the Tile if ((col < width) && (row < height)) { ds[ty][tx] = depth[col + row * width]; } else { ds[ty][tx] = 0; } // Sync Threads in Block __syncthreads(); // Perform the Bilateral Filter if ((col < width) && (row < height)) { float center_depth = ds[ty][tx]; float h = 0.0f, k = 0.0f; if (center_depth > 0) { for (int dy = -FILTER_HALF_WIDTH; dy <= FILTER_HALF_WIDTH; dy++) { for (int dx = -FILTER_HALF_WIDTH; dx <= FILTER_HALF_WIDTH; dx++) { int x = col + dx; int y = row + dy; if ((x >= 0) && (x < width) && (y >= 0) && (y < height)) { int i = tx + dx; int j = ty + dy; float current_depth; if ((i >= 0) && (i < BLOCK_WIDTH) && (j >= 0) && (j < BLOCK_WIDTH)) current_depth = ds[j][i]; else current_depth = depth[x + y * width]; if (current_depth > 0) { float d = static_cast<float>((dx * dx) + (dy * dy)); float r = static_cast<float>((current_depth - center_depth) * (current_depth - center_depth)); float weight = __expf(-0.5f * (d * sigma_d + r * sigma_r)); h += current_depth * weight; k += weight; } } } } } if (k > 0.0f) filtered_depth[col + row * width] = h / k; else filtered_depth[col + row * width] = 0; } } void BilateralKernel(float sigma_d, float sigma_r, int width, int height, const Depth *depth, Depth *filtered_depth) { // Launch Bilateral Filter Kernel int grid_width = (width + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH; int grid_height = (height + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH; dim3 grid_dim(grid_width, grid_height, 1); dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1); hipLaunchKernelGGL(( BilateralFilter), dim3(grid_dim), dim3(block_dim), 0, 0, sigma_d, sigma_r, width, height, depth, filtered_depth); CUDA_ERROR_CHECK(hipDeviceSynchronize()); } } // namespace dip
86df70d21af779d91f7d8135be3586fd1cbe4cd8.cu
/* Copyright (c) 2013-2014, Gregory P. Meyer University of Illinois Board of Trustees All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder(s) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <dip/common/error.h> #include <dip/common/types.h> #define FILTER_HALF_WIDTH 3 #define BLOCK_WIDTH 16 namespace dip { __global__ void BilateralFilter(float sigma_d, float sigma_r, int width, int height, const Depth *depth, Depth *filtered_depth) { // Allocate Shared Memory __shared__ Depth ds[BLOCK_WIDTH][BLOCK_WIDTH]; // Get Block and Thread Id int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Calculate Row & Column int col = tx + bx * BLOCK_WIDTH; int row = ty + by * BLOCK_WIDTH; // Cooperative Load of the Tile if ((col < width) && (row < height)) { ds[ty][tx] = depth[col + row * width]; } else { ds[ty][tx] = 0; } // Sync Threads in Block __syncthreads(); // Perform the Bilateral Filter if ((col < width) && (row < height)) { float center_depth = ds[ty][tx]; float h = 0.0f, k = 0.0f; if (center_depth > 0) { for (int dy = -FILTER_HALF_WIDTH; dy <= FILTER_HALF_WIDTH; dy++) { for (int dx = -FILTER_HALF_WIDTH; dx <= FILTER_HALF_WIDTH; dx++) { int x = col + dx; int y = row + dy; if ((x >= 0) && (x < width) && (y >= 0) && (y < height)) { int i = tx + dx; int j = ty + dy; float current_depth; if ((i >= 0) && (i < BLOCK_WIDTH) && (j >= 0) && (j < BLOCK_WIDTH)) current_depth = ds[j][i]; else current_depth = depth[x + y * width]; if (current_depth > 0) { float d = static_cast<float>((dx * dx) + (dy * dy)); float r = static_cast<float>((current_depth - center_depth) * (current_depth - center_depth)); float weight = __expf(-0.5f * (d * sigma_d + r * sigma_r)); h += current_depth * weight; k += weight; } } } } } if (k > 0.0f) filtered_depth[col + row * width] = h / k; else filtered_depth[col + row * width] = 0; } } void BilateralKernel(float sigma_d, float sigma_r, int width, int height, const Depth *depth, Depth *filtered_depth) { // Launch Bilateral Filter Kernel int grid_width = (width + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH; int grid_height = (height + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH; dim3 grid_dim(grid_width, grid_height, 1); dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1); BilateralFilter<<<grid_dim, block_dim>>>(sigma_d, sigma_r, width, height, depth, filtered_depth); CUDA_ERROR_CHECK(cudaDeviceSynchronize()); } } // namespace dip
15cc3e48a0fca640cb442daa2bc1cbd948284d87.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> int main() { int runtime; int driver; hipError_t runtime_err = hipRuntimeGetVersion(&runtime); hipError_t driver_err = hipRuntimeGetVersion(&driver); printf("Runtime Version: %d. %s\n", runtime, hipGetErrorString(runtime_err)); printf("Driver Version: %d. %s\n", driver, hipGetErrorString(driver_err)); }
15cc3e48a0fca640cb442daa2bc1cbd948284d87.cu
#include <stdio.h> int main() { int runtime; int driver; cudaError_t runtime_err = cudaRuntimeGetVersion(&runtime); cudaError_t driver_err = cudaRuntimeGetVersion(&driver); printf("Runtime Version: %d. %s\n", runtime, cudaGetErrorString(runtime_err)); printf("Driver Version: %d. %s\n", driver, cudaGetErrorString(driver_err)); }
9560dc11a97787c434be4615b74a91fc02201562.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Accelerated Computing for Deep Learning #include <math.h> #include <stdio.h> #include <stdlib.h> #include "timer.h" #include "check.h" #define threads_per_block 256 // has to be the multiple of 32, the larger the number(such as 512,1024...) #define SOFTENING 1e-9f //Average Billion Interactions per second will be small /* * Each body contains x, y, and z coordinate positions, * as well as velocities in the x, y, and z directions. */ typedef struct { float x, y, z, vx, vy, vz; } Body; /* * Do not modify this function. A constraint of this exercise is * that it remain a host function. */ void randomizeBodies(float *data, int n) { for (int i = 0; i < n; i++) { data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; } } /* * This function calculates the gravitational impact of all bodies in the system * on all others, but does not update their positions. */ __global__ //Indicates that following function will run on the GPU, and can be invoked globally void bodyForce(Body *p, float dt, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; //increased parallelization if (i < n) { float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; for (int j = 0; j < n; j++) { float dx = p[j].x - p[i].x; float dy = p[j].y - p[i].y; float dz = p[j].z - p[i].z; float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING; float invDist = rsqrtf(distSqr); float invDist3 = invDist * invDist * invDist; Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3; } p[i].vx += dt*Fx; p[i].vy += dt*Fy; p[i].vz += dt*Fz; } } int main(const int argc, const char** argv) { /* * Do not change the value for `nBodies` here. If you would like to modify it, * pass values into the command line. */ int nBodies = 2<<15; int salt = 0; if (argc > 1) nBodies = 2<<atoi(argv[1]); /* * This salt is for assessment reasons. Tampering with it will result in automatic failure. */ if (argc > 2) salt = atoi(argv[2]); const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations int bytes = nBodies*sizeof(Body);//Get buf in GPU float *buf; buf = (float*)malloc(bytes); Body *p = (Body*)buf;//Find body for buf float *buf_GPU; hipMallocManaged(&buf_GPU, bytes);//Allocate memory, and obtain a pointer that can be referenced in both host and device code Body *p1 = (Body*)buf_GPU;//Find body for buf_GPU /* * As a constraint of this exercise, `randomizeBodies` must remain a host function. */ randomizeBodies(buf, 6*nBodies); // Init pos / vel data size_t number_of_blocks = (nBodies + threads_per_block - 1) / threads_per_block;//Ensure there are at least `N` threads in the grid //but only 1 block's worth extra double totalTime = 0.0; /* * This simulation will run for 10 cycles of time, calculating gravitational * interaction amongst bodies, and adjusting their positions to reflect. */ /*******************************************************************/ // Do not modify these 2 lines of code. for (int iter = 0; iter < nIters; iter++) { StartTimer(); /*******************************************************************/ /* * You will likely wish to refactor the work being done in `bodyForce`, * as well as the work to integrate the positions. */ hipMemcpy(buf_GPU, buf, bytes, hipMemcpyHostToDevice);//`hipMemcpy` is to copy data from host to device //<<< ... >>>syntax just prior to passing the kernel any expected arguments hipLaunchKernelGGL(( bodyForce), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, p1, dt, nBodies); // compute interbody forces hipMemcpy(buf, buf_GPU, bytes, hipMemcpyDeviceToHost);//`hipMemcpy` is to copy data from device to host /* * This position integration cannot occur until this round of `bodyForce` has completed. * Also, the next round of `bodyForce` cannot begin until the integration is complete. */ for (int i = 0 ; i < nBodies; i++) { // integrate position p[i].x += p[i].vx*dt; p[i].y += p[i].vy*dt; p[i].z += p[i].vz*dt; } /*******************************************************************/ // Do not modify the code in this section. const double tElapsed = GetTimer() / 1000.0; totalTime += tElapsed; } double avgTime = totalTime / (double)(nIters); float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime; #ifdef ASSESS checkPerformance(buf, billionsOfOpsPerSecond, salt); #else checkAccuracy(buf, nBodies); printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, billionsOfOpsPerSecond); salt += 1; #endif /*******************************************************************/ /* * Feel free to modify code below. */ free(buf); hipFree(buf_GPU);//Use `hipFree` to free memory allocated with `hipMallocManaged` }
9560dc11a97787c434be4615b74a91fc02201562.cu
//Accelerated Computing for Deep Learning #include <math.h> #include <stdio.h> #include <stdlib.h> #include "timer.h" #include "check.h" #define threads_per_block 256 // has to be the multiple of 32, the larger the number(such as 512,1024...) #define SOFTENING 1e-9f //Average Billion Interactions per second will be small /* * Each body contains x, y, and z coordinate positions, * as well as velocities in the x, y, and z directions. */ typedef struct { float x, y, z, vx, vy, vz; } Body; /* * Do not modify this function. A constraint of this exercise is * that it remain a host function. */ void randomizeBodies(float *data, int n) { for (int i = 0; i < n; i++) { data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; } } /* * This function calculates the gravitational impact of all bodies in the system * on all others, but does not update their positions. */ __global__ //Indicates that following function will run on the GPU, and can be invoked globally void bodyForce(Body *p, float dt, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; //increased parallelization if (i < n) { float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; for (int j = 0; j < n; j++) { float dx = p[j].x - p[i].x; float dy = p[j].y - p[i].y; float dz = p[j].z - p[i].z; float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING; float invDist = rsqrtf(distSqr); float invDist3 = invDist * invDist * invDist; Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3; } p[i].vx += dt*Fx; p[i].vy += dt*Fy; p[i].vz += dt*Fz; } } int main(const int argc, const char** argv) { /* * Do not change the value for `nBodies` here. If you would like to modify it, * pass values into the command line. */ int nBodies = 2<<15; int salt = 0; if (argc > 1) nBodies = 2<<atoi(argv[1]); /* * This salt is for assessment reasons. Tampering with it will result in automatic failure. */ if (argc > 2) salt = atoi(argv[2]); const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations int bytes = nBodies*sizeof(Body);//Get buf in GPU float *buf; buf = (float*)malloc(bytes); Body *p = (Body*)buf;//Find body for buf float *buf_GPU; cudaMallocManaged(&buf_GPU, bytes);//Allocate memory, and obtain a pointer that can be referenced in both host and device code Body *p1 = (Body*)buf_GPU;//Find body for buf_GPU /* * As a constraint of this exercise, `randomizeBodies` must remain a host function. */ randomizeBodies(buf, 6*nBodies); // Init pos / vel data size_t number_of_blocks = (nBodies + threads_per_block - 1) / threads_per_block;//Ensure there are at least `N` threads in the grid //but only 1 block's worth extra double totalTime = 0.0; /* * This simulation will run for 10 cycles of time, calculating gravitational * interaction amongst bodies, and adjusting their positions to reflect. */ /*******************************************************************/ // Do not modify these 2 lines of code. for (int iter = 0; iter < nIters; iter++) { StartTimer(); /*******************************************************************/ /* * You will likely wish to refactor the work being done in `bodyForce`, * as well as the work to integrate the positions. */ cudaMemcpy(buf_GPU, buf, bytes, cudaMemcpyHostToDevice);//`cudaMemcpy` is to copy data from host to device //<<< ... >>> syntax just prior to passing the kernel any expected arguments bodyForce<<<number_of_blocks, threads_per_block>>>(p1, dt, nBodies); // compute interbody forces cudaMemcpy(buf, buf_GPU, bytes, cudaMemcpyDeviceToHost);//`cudaMemcpy` is to copy data from device to host /* * This position integration cannot occur until this round of `bodyForce` has completed. * Also, the next round of `bodyForce` cannot begin until the integration is complete. */ for (int i = 0 ; i < nBodies; i++) { // integrate position p[i].x += p[i].vx*dt; p[i].y += p[i].vy*dt; p[i].z += p[i].vz*dt; } /*******************************************************************/ // Do not modify the code in this section. const double tElapsed = GetTimer() / 1000.0; totalTime += tElapsed; } double avgTime = totalTime / (double)(nIters); float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime; #ifdef ASSESS checkPerformance(buf, billionsOfOpsPerSecond, salt); #else checkAccuracy(buf, nBodies); printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, billionsOfOpsPerSecond); salt += 1; #endif /*******************************************************************/ /* * Feel free to modify code below. */ free(buf); cudaFree(buf_GPU);//Use `cudaFree` to free memory allocated with `cudaMallocManaged` }
3de2e7a72f4e13943f40c9883ee5063ad78ffbef.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <stdio.h> #include <stdlib.h> #include "cuMatlab.h" #include "vertex.h" #include "particle.h" using namespace std; int main(int argc, char **argv){ //waveExample(1024); //auto f=[] __device__ (double x){return sinpi(x);}; //poisson(f, -1, 1, 32); dim3 mesh(1<<6, 1<<5, 1<<11); vertex(argc, argv, mesh, particleShadder); printf("Program terminated.\n"); return 0; }
3de2e7a72f4e13943f40c9883ee5063ad78ffbef.cu
#include <cuda.h> #include <curand.h> #include <stdio.h> #include <stdlib.h> #include "cuMatlab.h" #include "vertex.h" #include "particle.h" using namespace std; int main(int argc, char **argv){ //waveExample(1024); //auto f=[] __device__ (double x){return sinpi(x);}; //poisson(f, -1, 1, 32); dim3 mesh(1<<6, 1<<5, 1<<11); vertex(argc, argv, mesh, particleShadder); printf("Program terminated.\n"); return 0; }
73ba8069fd2c0bc233010a73b65d7b5c7cae266b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/unique.h> #include <thrust/execution_policy.h> template<typename ExecutionPolicy, typename Iterator1, typename Iterator2> __global__ void unique_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result) { *result = thrust::unique(exec, first, last); } template<typename ExecutionPolicy, typename Iterator1, typename BinaryPredicate, typename Iterator2> __global__ void unique_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, BinaryPredicate pred, Iterator2 result) { *result = thrust::unique(exec, first, last, pred); } template<typename T> struct is_equal_div_10_unique { __host__ __device__ bool operator()(const T x, const T& y) const { return ((int) x / 10) == ((int) y / 10); } }; template<typename ExecutionPolicy> void TestUniqueDevice(ExecutionPolicy exec) { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; Vector data(10); data[0] = 11; data[1] = 11; data[2] = 12; data[3] = 20; data[4] = 29; data[5] = 21; data[6] = 21; data[7] = 31; data[8] = 31; data[9] = 37; thrust::device_vector<Vector::iterator> new_last_vec(1); Vector::iterator new_last; hipLaunchKernelGGL(( unique_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), new_last_vec.begin()); { hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); } new_last = new_last_vec[0]; ASSERT_EQUAL(new_last - data.begin(), 7); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 12); ASSERT_EQUAL(data[2], 20); ASSERT_EQUAL(data[3], 29); ASSERT_EQUAL(data[4], 21); ASSERT_EQUAL(data[5], 31); ASSERT_EQUAL(data[6], 37); hipLaunchKernelGGL(( unique_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), new_last, is_equal_div_10_unique<T>(), new_last_vec.begin()); { hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); } new_last = new_last_vec[0]; ASSERT_EQUAL(new_last - data.begin(), 3); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 20); ASSERT_EQUAL(data[2], 31); } void TestUniqueDeviceSeq() { TestUniqueDevice(thrust::seq); } DECLARE_UNITTEST(TestUniqueDeviceSeq); void TestUniqueDeviceDevice() { TestUniqueDevice(thrust::device); } DECLARE_UNITTEST(TestUniqueDeviceDevice); void TestUniqueDeviceNoSync() { TestUniqueDevice(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestUniqueDeviceNoSync); template<typename ExecutionPolicy> void TestUniqueCudaStreams(ExecutionPolicy policy) { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; Vector data(10); data[0] = 11; data[1] = 11; data[2] = 12; data[3] = 20; data[4] = 29; data[5] = 21; data[6] = 21; data[7] = 31; data[8] = 31; data[9] = 37; thrust::device_vector<Vector::iterator> new_last_vec(1); Vector::iterator new_last; hipStream_t s; hipStreamCreate(&s); auto streampolicy = policy.on(s); new_last = thrust::unique(streampolicy, data.begin(), data.end()); hipStreamSynchronize(s); ASSERT_EQUAL(new_last - data.begin(), 7); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 12); ASSERT_EQUAL(data[2], 20); ASSERT_EQUAL(data[3], 29); ASSERT_EQUAL(data[4], 21); ASSERT_EQUAL(data[5], 31); ASSERT_EQUAL(data[6], 37); new_last = thrust::unique(streampolicy, data.begin(), new_last, is_equal_div_10_unique<T>()); hipStreamSynchronize(s); ASSERT_EQUAL(new_last - data.begin(), 3); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 20); ASSERT_EQUAL(data[2], 31); hipStreamDestroy(s); } void TestUniqueCudaStreamsSync() { TestUniqueCudaStreams(thrust::hip::par); } DECLARE_UNITTEST(TestUniqueCudaStreamsSync); void TestUniqueCudaStreamsNoSync() { TestUniqueCudaStreams(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestUniqueCudaStreamsNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3> __global__ void unique_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result1, Iterator3 result2) { *result2 = thrust::unique_copy(exec, first, last, result1); } template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename BinaryPredicate, typename Iterator3> __global__ void unique_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result1, BinaryPredicate pred, Iterator3 result2) { *result2 = thrust::unique_copy(exec, first, last, result1, pred); } template<typename ExecutionPolicy> void TestUniqueCopyDevice(ExecutionPolicy exec) { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; Vector data(10); data[0] = 11; data[1] = 11; data[2] = 12; data[3] = 20; data[4] = 29; data[5] = 21; data[6] = 21; data[7] = 31; data[8] = 31; data[9] = 37; Vector output(10, -1); thrust::device_vector<Vector::iterator> new_last_vec(1); Vector::iterator new_last; hipLaunchKernelGGL(( unique_copy_kernel), dim3(1),dim3(1), 0, 0, exec, data.begin(), data.end(), output.begin(), new_last_vec.begin()); { hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); } new_last = new_last_vec[0]; ASSERT_EQUAL(new_last - output.begin(), 7); ASSERT_EQUAL(output[0], 11); ASSERT_EQUAL(output[1], 12); ASSERT_EQUAL(output[2], 20); ASSERT_EQUAL(output[3], 29); ASSERT_EQUAL(output[4], 21); ASSERT_EQUAL(output[5], 31); ASSERT_EQUAL(output[6], 37); hipLaunchKernelGGL(( unique_copy_kernel), dim3(1),dim3(1), 0, 0, exec, output.begin(), new_last, data.begin(), is_equal_div_10_unique<T>(), new_last_vec.begin()); { hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); } new_last = new_last_vec[0]; ASSERT_EQUAL(new_last - data.begin(), 3); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 20); ASSERT_EQUAL(data[2], 31); } void TestUniqueCopyDeviceSeq() { TestUniqueCopyDevice(thrust::seq); } DECLARE_UNITTEST(TestUniqueCopyDeviceSeq); void TestUniqueCopyDeviceDevice() { TestUniqueCopyDevice(thrust::device); } DECLARE_UNITTEST(TestUniqueCopyDeviceDevice); void TestUniqueCopyDeviceNoSync() { TestUniqueCopyDevice(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestUniqueCopyDeviceNoSync); template<typename ExecutionPolicy> void TestUniqueCopyCudaStreams(ExecutionPolicy policy) { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; Vector data(10); data[0] = 11; data[1] = 11; data[2] = 12; data[3] = 20; data[4] = 29; data[5] = 21; data[6] = 21; data[7] = 31; data[8] = 31; data[9] = 37; Vector output(10, -1); thrust::device_vector<Vector::iterator> new_last_vec(1); Vector::iterator new_last; hipStream_t s; hipStreamCreate(&s); auto streampolicy = policy.on(s); new_last = thrust::unique_copy(streampolicy, data.begin(), data.end(), output.begin()); hipStreamSynchronize(s); ASSERT_EQUAL(new_last - output.begin(), 7); ASSERT_EQUAL(output[0], 11); ASSERT_EQUAL(output[1], 12); ASSERT_EQUAL(output[2], 20); ASSERT_EQUAL(output[3], 29); ASSERT_EQUAL(output[4], 21); ASSERT_EQUAL(output[5], 31); ASSERT_EQUAL(output[6], 37); new_last = thrust::unique_copy(streampolicy, output.begin(), new_last, data.begin(), is_equal_div_10_unique<T>()); hipStreamSynchronize(s); ASSERT_EQUAL(new_last - data.begin(), 3); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 20); ASSERT_EQUAL(data[2], 31); hipStreamDestroy(s); } void TestUniqueCopyCudaStreamsSync() { TestUniqueCopyCudaStreams(thrust::hip::par); } DECLARE_UNITTEST(TestUniqueCopyCudaStreamsSync); void TestUniqueCopyCudaStreamsNoSync() { TestUniqueCopyCudaStreams(thrust::hip::par_nosync); } DECLARE_UNITTEST(TestUniqueCopyCudaStreamsNoSync);
73ba8069fd2c0bc233010a73b65d7b5c7cae266b.cu
#include <unittest/unittest.h> #include <thrust/unique.h> #include <thrust/execution_policy.h> template<typename ExecutionPolicy, typename Iterator1, typename Iterator2> __global__ void unique_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result) { *result = thrust::unique(exec, first, last); } template<typename ExecutionPolicy, typename Iterator1, typename BinaryPredicate, typename Iterator2> __global__ void unique_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, BinaryPredicate pred, Iterator2 result) { *result = thrust::unique(exec, first, last, pred); } template<typename T> struct is_equal_div_10_unique { __host__ __device__ bool operator()(const T x, const T& y) const { return ((int) x / 10) == ((int) y / 10); } }; template<typename ExecutionPolicy> void TestUniqueDevice(ExecutionPolicy exec) { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; Vector data(10); data[0] = 11; data[1] = 11; data[2] = 12; data[3] = 20; data[4] = 29; data[5] = 21; data[6] = 21; data[7] = 31; data[8] = 31; data[9] = 37; thrust::device_vector<Vector::iterator> new_last_vec(1); Vector::iterator new_last; unique_kernel<<<1,1>>>(exec, data.begin(), data.end(), new_last_vec.begin()); { cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); } new_last = new_last_vec[0]; ASSERT_EQUAL(new_last - data.begin(), 7); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 12); ASSERT_EQUAL(data[2], 20); ASSERT_EQUAL(data[3], 29); ASSERT_EQUAL(data[4], 21); ASSERT_EQUAL(data[5], 31); ASSERT_EQUAL(data[6], 37); unique_kernel<<<1,1>>>(exec, data.begin(), new_last, is_equal_div_10_unique<T>(), new_last_vec.begin()); { cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); } new_last = new_last_vec[0]; ASSERT_EQUAL(new_last - data.begin(), 3); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 20); ASSERT_EQUAL(data[2], 31); } void TestUniqueDeviceSeq() { TestUniqueDevice(thrust::seq); } DECLARE_UNITTEST(TestUniqueDeviceSeq); void TestUniqueDeviceDevice() { TestUniqueDevice(thrust::device); } DECLARE_UNITTEST(TestUniqueDeviceDevice); void TestUniqueDeviceNoSync() { TestUniqueDevice(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestUniqueDeviceNoSync); template<typename ExecutionPolicy> void TestUniqueCudaStreams(ExecutionPolicy policy) { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; Vector data(10); data[0] = 11; data[1] = 11; data[2] = 12; data[3] = 20; data[4] = 29; data[5] = 21; data[6] = 21; data[7] = 31; data[8] = 31; data[9] = 37; thrust::device_vector<Vector::iterator> new_last_vec(1); Vector::iterator new_last; cudaStream_t s; cudaStreamCreate(&s); auto streampolicy = policy.on(s); new_last = thrust::unique(streampolicy, data.begin(), data.end()); cudaStreamSynchronize(s); ASSERT_EQUAL(new_last - data.begin(), 7); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 12); ASSERT_EQUAL(data[2], 20); ASSERT_EQUAL(data[3], 29); ASSERT_EQUAL(data[4], 21); ASSERT_EQUAL(data[5], 31); ASSERT_EQUAL(data[6], 37); new_last = thrust::unique(streampolicy, data.begin(), new_last, is_equal_div_10_unique<T>()); cudaStreamSynchronize(s); ASSERT_EQUAL(new_last - data.begin(), 3); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 20); ASSERT_EQUAL(data[2], 31); cudaStreamDestroy(s); } void TestUniqueCudaStreamsSync() { TestUniqueCudaStreams(thrust::cuda::par); } DECLARE_UNITTEST(TestUniqueCudaStreamsSync); void TestUniqueCudaStreamsNoSync() { TestUniqueCudaStreams(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestUniqueCudaStreamsNoSync); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3> __global__ void unique_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result1, Iterator3 result2) { *result2 = thrust::unique_copy(exec, first, last, result1); } template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename BinaryPredicate, typename Iterator3> __global__ void unique_copy_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 result1, BinaryPredicate pred, Iterator3 result2) { *result2 = thrust::unique_copy(exec, first, last, result1, pred); } template<typename ExecutionPolicy> void TestUniqueCopyDevice(ExecutionPolicy exec) { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; Vector data(10); data[0] = 11; data[1] = 11; data[2] = 12; data[3] = 20; data[4] = 29; data[5] = 21; data[6] = 21; data[7] = 31; data[8] = 31; data[9] = 37; Vector output(10, -1); thrust::device_vector<Vector::iterator> new_last_vec(1); Vector::iterator new_last; unique_copy_kernel<<<1,1>>>(exec, data.begin(), data.end(), output.begin(), new_last_vec.begin()); { cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); } new_last = new_last_vec[0]; ASSERT_EQUAL(new_last - output.begin(), 7); ASSERT_EQUAL(output[0], 11); ASSERT_EQUAL(output[1], 12); ASSERT_EQUAL(output[2], 20); ASSERT_EQUAL(output[3], 29); ASSERT_EQUAL(output[4], 21); ASSERT_EQUAL(output[5], 31); ASSERT_EQUAL(output[6], 37); unique_copy_kernel<<<1,1>>>(exec, output.begin(), new_last, data.begin(), is_equal_div_10_unique<T>(), new_last_vec.begin()); { cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); } new_last = new_last_vec[0]; ASSERT_EQUAL(new_last - data.begin(), 3); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 20); ASSERT_EQUAL(data[2], 31); } void TestUniqueCopyDeviceSeq() { TestUniqueCopyDevice(thrust::seq); } DECLARE_UNITTEST(TestUniqueCopyDeviceSeq); void TestUniqueCopyDeviceDevice() { TestUniqueCopyDevice(thrust::device); } DECLARE_UNITTEST(TestUniqueCopyDeviceDevice); void TestUniqueCopyDeviceNoSync() { TestUniqueCopyDevice(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestUniqueCopyDeviceNoSync); template<typename ExecutionPolicy> void TestUniqueCopyCudaStreams(ExecutionPolicy policy) { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; Vector data(10); data[0] = 11; data[1] = 11; data[2] = 12; data[3] = 20; data[4] = 29; data[5] = 21; data[6] = 21; data[7] = 31; data[8] = 31; data[9] = 37; Vector output(10, -1); thrust::device_vector<Vector::iterator> new_last_vec(1); Vector::iterator new_last; cudaStream_t s; cudaStreamCreate(&s); auto streampolicy = policy.on(s); new_last = thrust::unique_copy(streampolicy, data.begin(), data.end(), output.begin()); cudaStreamSynchronize(s); ASSERT_EQUAL(new_last - output.begin(), 7); ASSERT_EQUAL(output[0], 11); ASSERT_EQUAL(output[1], 12); ASSERT_EQUAL(output[2], 20); ASSERT_EQUAL(output[3], 29); ASSERT_EQUAL(output[4], 21); ASSERT_EQUAL(output[5], 31); ASSERT_EQUAL(output[6], 37); new_last = thrust::unique_copy(streampolicy, output.begin(), new_last, data.begin(), is_equal_div_10_unique<T>()); cudaStreamSynchronize(s); ASSERT_EQUAL(new_last - data.begin(), 3); ASSERT_EQUAL(data[0], 11); ASSERT_EQUAL(data[1], 20); ASSERT_EQUAL(data[2], 31); cudaStreamDestroy(s); } void TestUniqueCopyCudaStreamsSync() { TestUniqueCopyCudaStreams(thrust::cuda::par); } DECLARE_UNITTEST(TestUniqueCopyCudaStreamsSync); void TestUniqueCopyCudaStreamsNoSync() { TestUniqueCopyCudaStreams(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestUniqueCopyCudaStreamsNoSync);
6f8bdbbcc4397d577e7a4443c4dd3e45ef2d8466.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** @file * @brief Definition of CudaKirschOperatorFilter class. * * @author Jan Bobek */ #include "edetect.hxx" #include "IImage.hxx" #include "cuda/CudaError.hxx" #include "cuda/CudaKirschOperatorFilter.hxx" /** * @brief CUDA kernel for applying * the Kirsch operator. * * @param[out] ddata * The destination image data. * @param[in] dstride * Size of the row stride in destination data. * @param[in] sdata * The source image data. * @param[in] sstride * Size of the row stride in source data. * @param[in] rows * Number of rows in the image. * @param[in] cols * Number of columns in the image. */ __global__ void applyKirschOperatorKernel( unsigned char* ddata, unsigned int dstride, const unsigned char* sdata, unsigned int sstride, unsigned int rows, unsigned int cols ) { const unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int row = blockIdx.y * blockDim.y + threadIdx.y; if( !(row < rows && col < cols) ) return; float* const dstp = (float*)(ddata + row * dstride) + col; const unsigned char* const srcp = sdata + row * sstride + col * sizeof(float); const float* const tp = (const float*)(0 < row ? srcp - sstride : srcp); const float* const mp = (const float*)srcp; const float* const bp = (const float*)(row + 1 < rows ? srcp + sstride : srcp); const int li = (0 < col ? -1 : 0); const int ri = (col + 1 < cols ? 1 : 0); float x = 5.0f * (tp[li] + tp[ 0] + tp[ri]) - 3.0f * (mp[li] + mp[ri] + bp[li] + bp[0] + bp[ri]); float a = fabs(x); a = fmaxf( a, fabs( x += 8.0f * (mp[ri] - tp[li]) ) ); a = fmaxf( a, fabs( x += 8.0f * (bp[ri] - tp[ 0]) ) ); a = fmaxf( a, fabs( x += 8.0f * (bp[ 0] - tp[ri]) ) ); a = fmaxf( a, fabs( x += 8.0f * (bp[li] - mp[ri]) ) ); a = fmaxf( a, fabs( x += 8.0f * (mp[li] - bp[ri]) ) ); a = fmaxf( a, fabs( x += 8.0f * (tp[li] - bp[ 0]) ) ); a = fmaxf( a, fabs( x += 8.0f * (tp[ 0] - bp[li]) ) ); *dstp = a; } /*************************************************************************/ /* CudaKirschOperatorFilter */ /*************************************************************************/ void CudaKirschOperatorFilter::applyKirschOperator( IImage& dest, const IImage& src ) { // 32 = warp size, 8 * 32 = 256 threads const dim3 threadsPerBlock(32, 8); const dim3 numBlocks( (src.columns() + threadsPerBlock.x - 1) / threadsPerBlock.x, (src.rows() + threadsPerBlock.y - 1) / threadsPerBlock.y ); hipLaunchKernelGGL(( applyKirschOperatorKernel), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, dest.data(), dest.stride(), src.data(), src.stride(), src.rows(), src.columns() ); cudaCheckLastError( "CudaKirschOperatorFilter: kernel launch failed" ); cudaMsgCheckError( hipDeviceSynchronize(), "CudaKirschOperatorFilter: kernel run failed" ); }
6f8bdbbcc4397d577e7a4443c4dd3e45ef2d8466.cu
/** @file * @brief Definition of CudaKirschOperatorFilter class. * * @author Jan Bobek */ #include "edetect.hxx" #include "IImage.hxx" #include "cuda/CudaError.hxx" #include "cuda/CudaKirschOperatorFilter.hxx" /** * @brief CUDA kernel for applying * the Kirsch operator. * * @param[out] ddata * The destination image data. * @param[in] dstride * Size of the row stride in destination data. * @param[in] sdata * The source image data. * @param[in] sstride * Size of the row stride in source data. * @param[in] rows * Number of rows in the image. * @param[in] cols * Number of columns in the image. */ __global__ void applyKirschOperatorKernel( unsigned char* ddata, unsigned int dstride, const unsigned char* sdata, unsigned int sstride, unsigned int rows, unsigned int cols ) { const unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int row = blockIdx.y * blockDim.y + threadIdx.y; if( !(row < rows && col < cols) ) return; float* const dstp = (float*)(ddata + row * dstride) + col; const unsigned char* const srcp = sdata + row * sstride + col * sizeof(float); const float* const tp = (const float*)(0 < row ? srcp - sstride : srcp); const float* const mp = (const float*)srcp; const float* const bp = (const float*)(row + 1 < rows ? srcp + sstride : srcp); const int li = (0 < col ? -1 : 0); const int ri = (col + 1 < cols ? 1 : 0); float x = 5.0f * (tp[li] + tp[ 0] + tp[ri]) - 3.0f * (mp[li] + mp[ri] + bp[li] + bp[0] + bp[ri]); float a = fabs(x); a = fmaxf( a, fabs( x += 8.0f * (mp[ri] - tp[li]) ) ); a = fmaxf( a, fabs( x += 8.0f * (bp[ri] - tp[ 0]) ) ); a = fmaxf( a, fabs( x += 8.0f * (bp[ 0] - tp[ri]) ) ); a = fmaxf( a, fabs( x += 8.0f * (bp[li] - mp[ri]) ) ); a = fmaxf( a, fabs( x += 8.0f * (mp[li] - bp[ri]) ) ); a = fmaxf( a, fabs( x += 8.0f * (tp[li] - bp[ 0]) ) ); a = fmaxf( a, fabs( x += 8.0f * (tp[ 0] - bp[li]) ) ); *dstp = a; } /*************************************************************************/ /* CudaKirschOperatorFilter */ /*************************************************************************/ void CudaKirschOperatorFilter::applyKirschOperator( IImage& dest, const IImage& src ) { // 32 = warp size, 8 * 32 = 256 threads const dim3 threadsPerBlock(32, 8); const dim3 numBlocks( (src.columns() + threadsPerBlock.x - 1) / threadsPerBlock.x, (src.rows() + threadsPerBlock.y - 1) / threadsPerBlock.y ); applyKirschOperatorKernel<<< numBlocks, threadsPerBlock >>>( dest.data(), dest.stride(), src.data(), src.stride(), src.rows(), src.columns() ); cudaCheckLastError( "CudaKirschOperatorFilter: kernel launch failed" ); cudaMsgCheckError( cudaDeviceSynchronize(), "CudaKirschOperatorFilter: kernel run failed" ); }
2f062333195835f4a05a05a95b03255d35d48303.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/copy_if.cuh> #include <cudf/detail/get_value.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/error.hpp> #include <nvtext/generate_ngrams.hpp> #include <strings/utilities.cuh> #include <thrust/transform_scan.h> namespace nvtext { namespace detail { namespace { /** * @brief Generate ngrams from strings column. * * Adjacent strings are concatented with the provided separator. * The number of adjacent strings join depends on the specified ngrams value. * For example: for bigrams (ngrams=2), pairs of strings are concatenated. */ struct ngram_generator_fn { cudf::column_device_view const d_strings; cudf::size_type ngrams; cudf::string_view const d_separator; int32_t const* d_offsets{}; char* d_chars{}; /** * @brief Build ngram for each string. * * This is called for each thread and processed for each string. * Each string will produce the number of ngrams specified. * * @param idx Index of the kernel thread. * @return Number of bytes required for the string for this thread. */ __device__ cudf::size_type operator()(cudf::size_type idx) { char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr; cudf::size_type bytes = 0; for (cudf::size_type n = 0; n < ngrams; ++n) { auto const d_str = d_strings.element<cudf::string_view>(n + idx); bytes += d_str.size_bytes(); if (out_ptr) out_ptr = cudf::strings::detail::copy_string(out_ptr, d_str); if ((n + 1) >= ngrams) continue; bytes += d_separator.size_bytes(); if (out_ptr) out_ptr = cudf::strings::detail::copy_string(out_ptr, d_separator); } return bytes; } }; } // namespace std::unique_ptr<cudf::column> generate_ngrams( cudf::strings_column_view const& strings, cudf::size_type ngrams = 2, cudf::string_scalar const& separator = cudf::string_scalar{"_"}, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { CUDF_EXPECTS(separator.is_valid(), "Parameter separator must be valid"); cudf::string_view const d_separator(separator.data(), separator.size()); CUDF_EXPECTS(ngrams > 1, "Parameter ngrams should be an integer value of 2 or greater"); auto strings_count = strings.size(); if (strings_count == 0) // if no strings, return an empty column return cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING}); auto execpol = rmm::exec_policy(stream); auto strings_column = cudf::column_device_view::create(strings.parent(), stream); auto d_strings = *strings_column; // first create a new offsets vector removing nulls and empty strings from the input column std::unique_ptr<cudf::column> non_empty_offsets_column = [&] { cudf::column_view offsets_view( cudf::data_type{cudf::type_id::INT32}, strings_count + 1, strings.offsets().data<int32_t>()); auto table_offsets = cudf::detail::copy_if( cudf::table_view({offsets_view}), [d_strings, strings_count] __device__(cudf::size_type idx) { if (idx == strings_count) return true; if (d_strings.is_null(idx)) return false; return !d_strings.element<cudf::string_view>(idx).empty(); }, mr, stream) ->release(); strings_count = table_offsets.front()->size() - 1; return std::move(table_offsets.front()); }(); // this allows freeing the temporary table_offsets CUDF_EXPECTS(strings_count >= ngrams, "Insufficient number of strings to generate ngrams"); // create a temporary column view from the non-empty offsets and chars column views cudf::column_view strings_view(cudf::data_type{cudf::type_id::STRING}, strings_count, nullptr, nullptr, 0, 0, {non_empty_offsets_column->view(), strings.chars()}); strings_column = cudf::column_device_view::create(strings_view, stream); d_strings = *strings_column; // compute the number of strings of ngrams auto const ngrams_count = strings_count - ngrams + 1; // build output offsets by computing the output bytes for each generated ngram auto offsets_transformer_itr = thrust::make_transform_iterator(thrust::make_counting_iterator<cudf::size_type>(0), ngram_generator_fn{d_strings, ngrams, d_separator}); auto offsets_column = cudf::strings::detail::make_offsets_child_column( offsets_transformer_itr, offsets_transformer_itr + ngrams_count, mr, stream); auto d_offsets = offsets_column->view().data<int32_t>(); // build the chars column // generate the ngrams from the input strings and copy them into the chars data buffer cudf::size_type const total_bytes = thrust::device_pointer_cast(d_offsets)[ngrams_count]; auto chars_column = cudf::strings::detail::create_chars_child_column(ngrams_count, 0, total_bytes, mr, stream); char* const d_chars = chars_column->mutable_view().data<char>(); thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), ngrams_count, ngram_generator_fn{d_strings, ngrams, d_separator, d_offsets, d_chars}); chars_column->set_null_count(0); // make the output strings column from the offsets and chars column return cudf::make_strings_column(ngrams_count, std::move(offsets_column), std::move(chars_column), 0, rmm::device_buffer{0, stream, mr}, stream, mr); } } // namespace detail std::unique_ptr<cudf::column> generate_ngrams(cudf::strings_column_view const& strings, cudf::size_type ngrams, cudf::string_scalar const& separator, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::generate_ngrams(strings, ngrams, separator, mr); } namespace detail { namespace { struct character_ngram_generator_fn { cudf::column_device_view const d_strings; cudf::size_type ngrams; int32_t const* d_ngram_offsets{}; int32_t* d_offsets{}; char* d_chars{}; __device__ void operator()(cudf::size_type idx) { if (d_strings.is_null(idx)) return; auto const d_str = d_strings.element<cudf::string_view>(idx); if (d_str.empty()) return; auto itr = d_str.begin(); auto const ngram_offset = d_ngram_offsets[idx]; auto const ngram_count = d_ngram_offsets[idx + 1] - ngram_offset; auto d_sizes = d_offsets + ngram_offset; auto out_ptr = d_chars ? d_chars + *d_sizes : nullptr; for (cudf::size_type n = 0; n < ngram_count; ++n, ++itr) { auto const begin = itr.byte_offset(); auto const end = (itr + ngrams).byte_offset(); if (out_ptr) out_ptr = cudf::strings::detail::copy_and_increment(out_ptr, d_str.data() + begin, (end - begin)); else *d_sizes++ = end - begin; } } }; } // namespace std::unique_ptr<cudf::column> generate_character_ngrams(cudf::strings_column_view const& strings, cudf::size_type ngrams, hipStream_t stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(ngrams > 1, "Parameter ngrams should be an integer value of 2 or greater"); auto const strings_count = strings.size(); if (strings_count == 0) // if no strings, return an empty column return cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING}); auto const execpol = rmm::exec_policy(stream); auto const strings_column = cudf::column_device_view::create(strings.parent(), stream); auto const d_strings = *strings_column; // create a vector of ngram offsets for each string rmm::device_vector<cudf::size_type> ngram_offsets(strings_count + 1); thrust::transform_exclusive_scan( execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), thrust::make_counting_iterator<cudf::size_type>(strings_count + 1), ngram_offsets.begin(), [d_strings, strings_count, ngrams] __device__(auto idx) { if (d_strings.is_null(idx) || (idx == strings_count)) return 0; auto const length = d_strings.element<cudf::string_view>(idx).length(); return ::max(0, (length + 1 - ngrams)); }, cudf::size_type{0}, thrust::plus<cudf::size_type>()); // total count is the last entry auto const d_ngram_offsets = ngram_offsets.data().get(); cudf::size_type total_ngrams = 0; CUDA_TRY(hipMemcpyAsync(&total_ngrams, d_ngram_offsets + strings_count, sizeof(cudf::size_type), hipMemcpyDeviceToHost, stream)); CUDF_EXPECTS(total_ngrams > 0, "Insufficient number of characters in each string to generate ngrams"); // create output offsets column auto offsets_column = cudf::make_numeric_column(cudf::data_type{cudf::type_id::INT32}, total_ngrams + 1, cudf::mask_state::UNALLOCATED, stream, mr); auto d_offsets = offsets_column->mutable_view().data<int32_t>(); // compute the size of each ngram -- output goes in d_offsets character_ngram_generator_fn generator{d_strings, ngrams, d_ngram_offsets, d_offsets}; thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count, generator); // convert sizes into offsets in-place thrust::exclusive_scan(execpol->on(stream), d_offsets, d_offsets + total_ngrams + 1, d_offsets); // build the chars column auto const chars_bytes = cudf::detail::get_value<int32_t>(offsets_column->view(), total_ngrams, stream); auto chars_column = cudf::strings::detail::create_chars_child_column(total_ngrams, 0, chars_bytes, mr, stream); generator.d_chars = chars_column->mutable_view().data<char>(); // output chars thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count, generator); return cudf::make_strings_column(total_ngrams, std::move(offsets_column), std::move(chars_column), 0, // no nulls in the result rmm::device_buffer{0, stream, mr}, stream, mr); } } // namespace detail std::unique_ptr<cudf::column> generate_character_ngrams(cudf::strings_column_view const& strings, cudf::size_type ngrams, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::generate_character_ngrams(strings, ngrams, 0, mr); } } // namespace nvtext
2f062333195835f4a05a05a95b03255d35d48303.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/copy_if.cuh> #include <cudf/detail/get_value.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/error.hpp> #include <nvtext/generate_ngrams.hpp> #include <strings/utilities.cuh> #include <thrust/transform_scan.h> namespace nvtext { namespace detail { namespace { /** * @brief Generate ngrams from strings column. * * Adjacent strings are concatented with the provided separator. * The number of adjacent strings join depends on the specified ngrams value. * For example: for bigrams (ngrams=2), pairs of strings are concatenated. */ struct ngram_generator_fn { cudf::column_device_view const d_strings; cudf::size_type ngrams; cudf::string_view const d_separator; int32_t const* d_offsets{}; char* d_chars{}; /** * @brief Build ngram for each string. * * This is called for each thread and processed for each string. * Each string will produce the number of ngrams specified. * * @param idx Index of the kernel thread. * @return Number of bytes required for the string for this thread. */ __device__ cudf::size_type operator()(cudf::size_type idx) { char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr; cudf::size_type bytes = 0; for (cudf::size_type n = 0; n < ngrams; ++n) { auto const d_str = d_strings.element<cudf::string_view>(n + idx); bytes += d_str.size_bytes(); if (out_ptr) out_ptr = cudf::strings::detail::copy_string(out_ptr, d_str); if ((n + 1) >= ngrams) continue; bytes += d_separator.size_bytes(); if (out_ptr) out_ptr = cudf::strings::detail::copy_string(out_ptr, d_separator); } return bytes; } }; } // namespace std::unique_ptr<cudf::column> generate_ngrams( cudf::strings_column_view const& strings, cudf::size_type ngrams = 2, cudf::string_scalar const& separator = cudf::string_scalar{"_"}, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { CUDF_EXPECTS(separator.is_valid(), "Parameter separator must be valid"); cudf::string_view const d_separator(separator.data(), separator.size()); CUDF_EXPECTS(ngrams > 1, "Parameter ngrams should be an integer value of 2 or greater"); auto strings_count = strings.size(); if (strings_count == 0) // if no strings, return an empty column return cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING}); auto execpol = rmm::exec_policy(stream); auto strings_column = cudf::column_device_view::create(strings.parent(), stream); auto d_strings = *strings_column; // first create a new offsets vector removing nulls and empty strings from the input column std::unique_ptr<cudf::column> non_empty_offsets_column = [&] { cudf::column_view offsets_view( cudf::data_type{cudf::type_id::INT32}, strings_count + 1, strings.offsets().data<int32_t>()); auto table_offsets = cudf::detail::copy_if( cudf::table_view({offsets_view}), [d_strings, strings_count] __device__(cudf::size_type idx) { if (idx == strings_count) return true; if (d_strings.is_null(idx)) return false; return !d_strings.element<cudf::string_view>(idx).empty(); }, mr, stream) ->release(); strings_count = table_offsets.front()->size() - 1; return std::move(table_offsets.front()); }(); // this allows freeing the temporary table_offsets CUDF_EXPECTS(strings_count >= ngrams, "Insufficient number of strings to generate ngrams"); // create a temporary column view from the non-empty offsets and chars column views cudf::column_view strings_view(cudf::data_type{cudf::type_id::STRING}, strings_count, nullptr, nullptr, 0, 0, {non_empty_offsets_column->view(), strings.chars()}); strings_column = cudf::column_device_view::create(strings_view, stream); d_strings = *strings_column; // compute the number of strings of ngrams auto const ngrams_count = strings_count - ngrams + 1; // build output offsets by computing the output bytes for each generated ngram auto offsets_transformer_itr = thrust::make_transform_iterator(thrust::make_counting_iterator<cudf::size_type>(0), ngram_generator_fn{d_strings, ngrams, d_separator}); auto offsets_column = cudf::strings::detail::make_offsets_child_column( offsets_transformer_itr, offsets_transformer_itr + ngrams_count, mr, stream); auto d_offsets = offsets_column->view().data<int32_t>(); // build the chars column // generate the ngrams from the input strings and copy them into the chars data buffer cudf::size_type const total_bytes = thrust::device_pointer_cast(d_offsets)[ngrams_count]; auto chars_column = cudf::strings::detail::create_chars_child_column(ngrams_count, 0, total_bytes, mr, stream); char* const d_chars = chars_column->mutable_view().data<char>(); thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), ngrams_count, ngram_generator_fn{d_strings, ngrams, d_separator, d_offsets, d_chars}); chars_column->set_null_count(0); // make the output strings column from the offsets and chars column return cudf::make_strings_column(ngrams_count, std::move(offsets_column), std::move(chars_column), 0, rmm::device_buffer{0, stream, mr}, stream, mr); } } // namespace detail std::unique_ptr<cudf::column> generate_ngrams(cudf::strings_column_view const& strings, cudf::size_type ngrams, cudf::string_scalar const& separator, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::generate_ngrams(strings, ngrams, separator, mr); } namespace detail { namespace { struct character_ngram_generator_fn { cudf::column_device_view const d_strings; cudf::size_type ngrams; int32_t const* d_ngram_offsets{}; int32_t* d_offsets{}; char* d_chars{}; __device__ void operator()(cudf::size_type idx) { if (d_strings.is_null(idx)) return; auto const d_str = d_strings.element<cudf::string_view>(idx); if (d_str.empty()) return; auto itr = d_str.begin(); auto const ngram_offset = d_ngram_offsets[idx]; auto const ngram_count = d_ngram_offsets[idx + 1] - ngram_offset; auto d_sizes = d_offsets + ngram_offset; auto out_ptr = d_chars ? d_chars + *d_sizes : nullptr; for (cudf::size_type n = 0; n < ngram_count; ++n, ++itr) { auto const begin = itr.byte_offset(); auto const end = (itr + ngrams).byte_offset(); if (out_ptr) out_ptr = cudf::strings::detail::copy_and_increment(out_ptr, d_str.data() + begin, (end - begin)); else *d_sizes++ = end - begin; } } }; } // namespace std::unique_ptr<cudf::column> generate_character_ngrams(cudf::strings_column_view const& strings, cudf::size_type ngrams, cudaStream_t stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(ngrams > 1, "Parameter ngrams should be an integer value of 2 or greater"); auto const strings_count = strings.size(); if (strings_count == 0) // if no strings, return an empty column return cudf::make_empty_column(cudf::data_type{cudf::type_id::STRING}); auto const execpol = rmm::exec_policy(stream); auto const strings_column = cudf::column_device_view::create(strings.parent(), stream); auto const d_strings = *strings_column; // create a vector of ngram offsets for each string rmm::device_vector<cudf::size_type> ngram_offsets(strings_count + 1); thrust::transform_exclusive_scan( execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), thrust::make_counting_iterator<cudf::size_type>(strings_count + 1), ngram_offsets.begin(), [d_strings, strings_count, ngrams] __device__(auto idx) { if (d_strings.is_null(idx) || (idx == strings_count)) return 0; auto const length = d_strings.element<cudf::string_view>(idx).length(); return std::max(0, (length + 1 - ngrams)); }, cudf::size_type{0}, thrust::plus<cudf::size_type>()); // total count is the last entry auto const d_ngram_offsets = ngram_offsets.data().get(); cudf::size_type total_ngrams = 0; CUDA_TRY(cudaMemcpyAsync(&total_ngrams, d_ngram_offsets + strings_count, sizeof(cudf::size_type), cudaMemcpyDeviceToHost, stream)); CUDF_EXPECTS(total_ngrams > 0, "Insufficient number of characters in each string to generate ngrams"); // create output offsets column auto offsets_column = cudf::make_numeric_column(cudf::data_type{cudf::type_id::INT32}, total_ngrams + 1, cudf::mask_state::UNALLOCATED, stream, mr); auto d_offsets = offsets_column->mutable_view().data<int32_t>(); // compute the size of each ngram -- output goes in d_offsets character_ngram_generator_fn generator{d_strings, ngrams, d_ngram_offsets, d_offsets}; thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count, generator); // convert sizes into offsets in-place thrust::exclusive_scan(execpol->on(stream), d_offsets, d_offsets + total_ngrams + 1, d_offsets); // build the chars column auto const chars_bytes = cudf::detail::get_value<int32_t>(offsets_column->view(), total_ngrams, stream); auto chars_column = cudf::strings::detail::create_chars_child_column(total_ngrams, 0, chars_bytes, mr, stream); generator.d_chars = chars_column->mutable_view().data<char>(); // output chars thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count, generator); return cudf::make_strings_column(total_ngrams, std::move(offsets_column), std::move(chars_column), 0, // no nulls in the result rmm::device_buffer{0, stream, mr}, stream, mr); } } // namespace detail std::unique_ptr<cudf::column> generate_character_ngrams(cudf::strings_column_view const& strings, cudf::size_type ngrams, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::generate_character_ngrams(strings, ngrams, 0, mr); } } // namespace nvtext
81ff1758fc2bded09bbeb06ba1e5e3a1e44e8e14.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "__pairmult2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int nrows = 1; int bncols = 1; int brows1 = 1; int brows2 = 1; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); int lda = 1; float *A2 = NULL; hipMalloc(&A2, XSIZE*YSIZE); int lda2 = 1; float *Bdata = NULL; hipMalloc(&Bdata, XSIZE*YSIZE); int *Bir = NULL; hipMalloc(&Bir, XSIZE*YSIZE); int *Bjc = NULL; hipMalloc(&Bjc, XSIZE*YSIZE); int broff = 1; int bcoff = 1; float *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int ldc = 1; int transpose = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( __pairmult2), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,bncols,brows1,brows2,A,lda,A2,lda2,Bdata,Bir,Bjc,broff,bcoff,C,ldc,transpose); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( __pairmult2), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,bncols,brows1,brows2,A,lda,A2,lda2,Bdata,Bir,Bjc,broff,bcoff,C,ldc,transpose); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( __pairmult2), dim3(gridBlock),dim3(threadBlock), 0, 0, nrows,bncols,brows1,brows2,A,lda,A2,lda2,Bdata,Bir,Bjc,broff,bcoff,C,ldc,transpose); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
81ff1758fc2bded09bbeb06ba1e5e3a1e44e8e14.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "__pairmult2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int nrows = 1; int bncols = 1; int brows1 = 1; int brows2 = 1; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); int lda = 1; float *A2 = NULL; cudaMalloc(&A2, XSIZE*YSIZE); int lda2 = 1; float *Bdata = NULL; cudaMalloc(&Bdata, XSIZE*YSIZE); int *Bir = NULL; cudaMalloc(&Bir, XSIZE*YSIZE); int *Bjc = NULL; cudaMalloc(&Bjc, XSIZE*YSIZE); int broff = 1; int bcoff = 1; float *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int ldc = 1; int transpose = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); __pairmult2<<<gridBlock,threadBlock>>>(nrows,bncols,brows1,brows2,A,lda,A2,lda2,Bdata,Bir,Bjc,broff,bcoff,C,ldc,transpose); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { __pairmult2<<<gridBlock,threadBlock>>>(nrows,bncols,brows1,brows2,A,lda,A2,lda2,Bdata,Bir,Bjc,broff,bcoff,C,ldc,transpose); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { __pairmult2<<<gridBlock,threadBlock>>>(nrows,bncols,brows1,brows2,A,lda,A2,lda2,Bdata,Bir,Bjc,broff,bcoff,C,ldc,transpose); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
GpuConv2D.hip
// !!! This is a file automatically generated by hipify!!! #pragma once #include <stdio.h> #include <sstream> #include <assert.h> #include <hip/hip_runtime.h> #include "core/pack/Pack.h" #include "core/pack/Load.h" #include "core/pack/Call.h" #include "core/pack/GetInds.h" #include "core/pack/GetDims.h" #include "core/utils/CudaErrorCheck.cu" #include "core/utils/CudaSizes.h" #include "core/utils/TypesUtils.h" namespace keops { template <typename T> __device__ static constexpr T static_max_device(T a, T b) { return a < b ? b : a; } template <typename TYPE, int DIMIN, int DIMOUT, class FUN> __global__ void reduce2D(TYPE *in, TYPE *out, int sizeY,int nx) { /* Function used as a final reduction pass in the 2D scheme, * once the block reductions have been made. * Takes as input: * - in, a sizeY * (nx * DIMIN ) array * - out, an nx * DIMOUT array * * Computes, in parallel, the "columnwise"-reduction (which correspond to lines of blocks) * of *in and stores the result in out. */ int tid = blockIdx.x * blockDim.x + threadIdx.x; /* As shown below, the code that is used to store the block-wise sum "tmp" in parallel is: if(i<nx) for(int k=0; k<DIMX1; k++) (*px)[blockIdx.y*DIMX1*nx+i*DIMX1+k] = tmp[k]; */ /* // This code should be a bit more efficient (more parallel) in the case // of a simple "fully parallel" reduction op such as "sum", "max" or "min" TYPE res = 0; if(tid < nx*DIMVECT) { for (int i = 0; i < sizeY; i++) res += in[tid + i*nx*DIMVECT]; // We use "+=" as a reduction op. But it could be anything, really! // res = in[tid+ nx* DIMVECT]; out[tid] = res; } */ // However, for now, we use a "vectorized" reduction op., // which can also handle non-trivial reductions such as "LogSumExp" __TYPEACC__ acc[DIMIN]; typename FUN::template InitializeReduction<__TYPEACC__,TYPE>()(acc); // acc = 0 if(tid < nx) { for (int y = 0; y < sizeY; y++) typename FUN::template ReducePair<__TYPEACC__,TYPE>()(acc, in + (tid+y*nx)*DIMIN); // acc += in[(tid+y*nx) *DIMVECT : +DIMVECT]; typename FUN::template FinalizeOutput<__TYPEACC__,TYPE>()(acc, out+tid*DIMOUT, tid); } } // thread kernel: computation of x1i = sum_j k(x2i,x3i,...,y1j,y2j,...) for index i given by thread id. // N.B.: This routine by itself is generic, and does not specifically refer to the "sum" operation. // It can be used for any Map-Reduce operation, provided that "fun" is well-understood. template < typename TYPE, class FUN > __global__ void GpuConv2DOnDevice(FUN fun, int nx, int ny, TYPE *out, TYPE **args) { /* * px, py and pp are pointers to the device global memory. * They are arrays of arrays with the relevant size: for instance, * px[1] is a TYPE array of size ( nx * DIMSX::VAL(1) ). * * out is the output array, of size (nx * DIMRED). * */ // gets dimensions and number of variables of inputs of function FUN using DIMSX = typename FUN::DIMSX; // DIMSX is a "vector" of templates giving dimensions of xi variables using DIMSY = typename FUN::DIMSY; // DIMSY is a "vector" of templates giving dimensions of yj variables using DIMSP = typename FUN::DIMSP; // DIMSP is a "vector" of templates giving dimensions of parameters variables typedef typename FUN::INDSI INDSI; typedef typename FUN::INDSJ INDSJ; typedef typename FUN::INDSP INDSP; const int DIMX = DIMSX::SUM; // DIMX is sum of dimensions for xi variables const int DIMY = DIMSY::SUM; // DIMY is sum of dimensions for yj variables const int DIMP = DIMSP::SUM; // DIMP is sum of dimensions for parameters variables const int DIMRED = FUN::DIMRED; // dimension of reduction operation const int DIMFOUT = FUN::F::DIM; // DIMFOUT is dimension of output variable of inner function TYPE fout[DIMFOUT]; // Load the parameter vector in the Thread Memory, for improved efficiency //TYPE param_loc[static_max_device(DIMP,1)]; // (Jean :) Direct inlining to compile on Ubuntu 16.04 with nvcc7.5, // which is a standard config in research. For whatever reason, I can't make // it work an other way... Is it bad practice/performance? TYPE param_loc[DIMP < 1 ? 1 : DIMP]; load<DIMSP, INDSP>(0,param_loc,args); // load parameters variables from global memory to local thread memory // Weird syntax to create a pointer in shared memory. extern __shared__ char yj_char[]; TYPE* const yj = reinterpret_cast<TYPE*>(yj_char); // Step 1 : Load in Thread Memory the information needed in the current line --------------------------- int i = blockIdx.x * blockDim.x + threadIdx.x; TYPE xi[DIMX < 1 ? 1 : DIMX]; #if SUM_SCHEME == BLOCK_SUM // N.B. To be consistent with the convention used in GpuConv1D, when SUM_SCHEME == BLOCK_SUM=1 we accumulate results in TYPE // instead of __TYPEACC__ in each block, __TYPEACC__ will be used only to sum up results from each block TYPE acc[DIMRED]; #else __TYPEACC__ acc[DIMRED]; #endif #if SUM_SCHEME == KAHAN_SCHEME const int DIM_KAHAN = FUN::template KahanScheme<__TYPEACC__,TYPE>::DIMACC; TYPE tmp[DIM_KAHAN]; #endif if(i<nx) { // we will compute outi only if i is in the range #if SUM_SCHEME == BLOCK_SUM typename FUN::template InitializeReduction<TYPE,TYPE>()(acc); // acc = 0 #else typename FUN::template InitializeReduction<__TYPEACC__,TYPE>()(acc); // acc = 0 #endif #if SUM_SCHEME == KAHAN_SCHEME VectAssign<DIM_KAHAN>(tmp,0.0f); #endif // Load xi from device global memory. // Remember that we use an interleaved memory scheme where // xi = [ x1i, x2i, x3i, ... ]. load< DIMSX, INDSI>(i,xi,args); // load xi variables from global memory to local thread memory } // Step 2 : Load in Shared Memory the information needed in the current block of the product ----------- // In the 1D scheme, we use a loop to run through the line. // In the 2D scheme presented here, the computation is done in parallel wrt both lines and columns. // Hence, we use "blockId.y" to get our current column number. int j = blockIdx.y * blockDim.x + threadIdx.x; // Same blockDim in x and y : squared tiles. if(j<ny) // we load yj from device global memory only if j<ny load<DIMSY,INDSJ>(j,yj+threadIdx.x*DIMY,args); // load yj variables from global memory to shared memory // More precisely : the j-th line of py is loaded to yj, at a location which depends on the // current threadId. __syncthreads(); // Make sure nobody lags behind // Step 3 : Once the data is loaded, execute fun -------------------------------------------------------- // N.B.: There's no explicit summation here. Just calls to fun, which *accumulates* the results // along the line, but does not *have* to use a "+=" as reduction operator. // In the future, we could provide other reductions: max, min, ... whatever's needed. if(i<nx) { // we compute x1i only if needed TYPE* yjrel = yj; // Loop on the columns of the current block. for(int jrel = 0; (jrel<blockDim.x) && ((blockDim.x*blockIdx.y+jrel)< ny); jrel++, yjrel+=DIMY) { call<DIMSX,DIMSY,DIMSP>(fun,fout,xi,yjrel,param_loc); // Call the function, which outputs results in fout #if SUM_SCHEME == BLOCK_SUM #if USE_HALF int ind = blockDim.x*blockIdx.y+jrel; typename FUN::template ReducePairShort<TYPE,TYPE>()(acc, fout, __floats2half2_rn(2*ind,2*ind+1)); // acc += fout #else typename FUN::template ReducePairShort<TYPE,TYPE>()(acc, fout, blockDim.x*blockIdx.y+jrel); // acc += fout #endif #elif SUM_SCHEME == KAHAN_SCHEME typename FUN::template KahanScheme<__TYPEACC__,TYPE>()(acc, fout, tmp); #else #if USE_HALF int ind = blockDim.x*blockIdx.y+jrel; typename FUN::template ReducePairShort<TYPE,TYPE>()(acc, fout, __floats2half2_rn(2*ind,2*ind+1)); // acc += fout #else typename FUN::template ReducePairShort<TYPE,TYPE>()(acc, fout, blockDim.x*blockIdx.y+jrel); // acc += fout #endif #endif } } __syncthreads(); // Step 4 : Save the result in global memory ----------------------------------------------------------- // The current thread has computed the "linewise-sum" of a small block of the full Kernel Product // matrix, which corresponds to KP[ blockIdx.x * blockDim.x : (blockIdx.x+1) * blockDim.x , // blockIdx.y * blockDim.x : (blockIdx.y+1) * blockDim.x ] // We accumulate it in the output array out, which has in fact gridSize.y * nx // lines of size DIMRED. The final reduction, which "sums over the block lines", // shall be done in a later step. if(i<nx) for(int k=0; k<DIMRED; k++) out[blockIdx.y*DIMRED*nx+i*DIMRED+k] = acc[k]; } /////////////////////////////////////////////////// struct GpuConv2D_FromHost { template < typename TYPE, class FUN > static int Eval_(FUN fun, int nx, int ny, TYPE *out, TYPE **args_h) { using DIMSX = typename FUN::DIMSX; using DIMSY = typename FUN::DIMSY; using DIMSP = typename FUN::DIMSP; typedef typename FUN::INDSI INDSI; typedef typename FUN::INDSJ INDSJ; typedef typename FUN::INDSP INDSP; const int DIMX = DIMSX::SUM; const int DIMY = DIMSY::SUM; const int DIMP = DIMSP::SUM; const int DIMOUT = FUN::DIM; // dimension of output variable const int DIMRED = FUN::DIMRED; // dimension of reduction operation const int SIZEI = DIMSX::SIZE; const int SIZEJ = DIMSY::SIZE; const int SIZEP = DIMSP::SIZE; static const int NMINARGS = FUN::NMINARGS; // Compute on device : grid is 2d and block is 1d int dev = -1; CudaSafeCall(hipGetDevice(&dev)); SetGpuProps(dev); dim3 blockSize; // warning : blockSize.x was previously set to CUDA_BLOCK_SIZE; currently CUDA_BLOCK_SIZE value is used as a bound. blockSize.x = ::::min(CUDA_BLOCK_SIZE,::::min(maxThreadsPerBlock, (int) (sharedMemPerBlock / ::::max(1, (int)(DIMY*sizeof(TYPE))) ))); // number of threads in each block dim3 gridSize; gridSize.x = nx / blockSize.x + (nx%blockSize.x==0 ? 0 : 1); gridSize.y = ny / blockSize.x + (ny%blockSize.x==0 ? 0 : 1); // Reduce : grid and block are both 1d dim3 blockSize2; blockSize2.x = CUDA_BLOCK_SIZE; // number of threads in each block dim3 gridSize2; gridSize2.x = (nx*DIMRED) / blockSize2.x + ((nx*DIMRED)%blockSize2.x==0 ? 0 : 1); // Data on the device. We need an "inflated" outB, which contains gridSize.y "copies" of out // that will be reduced in the final pass. TYPE *outB, *out_d; // device array of pointers to device data TYPE **args_d; // single hipMalloc void *p_data; CudaSafeCall(hipMalloc(&p_data, sizeof(TYPE*)*NMINARGS+sizeof(TYPE)*(DIMP+nx*(DIMX+DIMOUT)+ny*DIMY+nx*DIMRED*gridSize.y))); args_d = (TYPE **) p_data; TYPE *dataloc = (TYPE *) (args_d + NMINARGS); out_d = dataloc; dataloc += nx*DIMOUT; // host array of pointers to device data TYPE *ph[NMINARGS]; for (int k = 0; k < SIZEP; k++) { int indk = INDSP::VAL(k); int nvals = DIMSP::VAL(k); CudaSafeCall(hipMemcpy(dataloc, args_h[indk], sizeof(TYPE) * nvals, hipMemcpyHostToDevice)); ph[indk] = dataloc; dataloc += nvals; } for (int k = 0; k < SIZEI; k++) { int indk = INDSI::VAL(k); int nvals = nx * DIMSX::VAL(k); CudaSafeCall(hipMemcpy(dataloc, args_h[indk], sizeof(TYPE) * nvals, hipMemcpyHostToDevice)); ph[indk] = dataloc; dataloc += nvals; } for (int k = 0; k < SIZEJ; k++) { int indk = INDSJ::VAL(k); int nvals = ny * DIMSY::VAL(k); CudaSafeCall(hipMemcpy(dataloc, args_h[indk], sizeof(TYPE) * nvals, hipMemcpyHostToDevice)); ph[indk] = dataloc; dataloc += nvals; } outB = dataloc; // we write the result before reduction in the "inflated" vector // copy arrays of pointers CudaSafeCall(hipMemcpy(args_d, ph, NMINARGS * sizeof(TYPE *), hipMemcpyHostToDevice)); // Size of the SharedData : blockSize.x*(DIMY)*sizeof(TYPE) hipLaunchKernelGGL(( GpuConv2DOnDevice<TYPE>), dim3(gridSize),dim3(blockSize),blockSize.x*(DIMY)*sizeof(TYPE), 0, fun,nx,ny,outB,args_d); // block until the device has completed CudaSafeCall(hipDeviceSynchronize()); CudaCheckError(); // Since we've used a 2D scheme, there's still a "blockwise" line reduction to make on // the output array outB. We go from shape ( gridSize.y * nx, DIMRED ) to (nx, DIMOUT) hipLaunchKernelGGL(( reduce2D<TYPE,DIMRED,DIMOUT,FUN>), dim3(gridSize2), dim3(blockSize2), 0, 0, outB, out_d, gridSize.y,nx); // block until the device has completed CudaSafeCall(hipDeviceSynchronize()); CudaCheckError(); // Send data from device to host. CudaSafeCall(hipMemcpy(out, out_d, sizeof(TYPE)*(nx*DIMOUT),hipMemcpyDeviceToHost)); // Free memory. CudaSafeCall(hipFree(p_data)); return 0; } // Wrapper around GpuConv2D, which takes lists of arrays *x1, *x2, ..., *y1, *y2, ... // and use getlist to enroll them into "pointers arrays" px and py. template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, int device_id, TYPE *out, Args... args) { // We set the GPU device on which computations will be performed if(device_id!=-1) CudaSafeCall(hipSetDevice(device_id)); static const int Nargs = sizeof...(Args); TYPE *pargs[Nargs]; unpack(pargs, args...); return Eval_(fun,nx,ny,out,pargs); } // same without the device_id argument template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, TYPE *out, Args... args) { return Eval(fun, nx, ny, -1, out, args...); } // Idem, but with args given as an array of arrays, instead of an explicit list of arrays template < typename TYPE, class FUN > static int Eval(FUN fun, int nx, int ny, TYPE *out, TYPE **pargs, int device_id=-1) { // We set the GPU device on which computations will be performed if(device_id!=-1) CudaSafeCall(hipSetDevice(device_id)); return Eval_(fun,nx,ny,out,pargs); } }; struct GpuConv2D_FromDevice { template < typename TYPE, class FUN > static int Eval_(FUN fun, int nx, int ny, TYPE *out, TYPE **args) { static const int DIMRED = FUN::DIMRED; static const int DIMOUT = FUN::DIM; static const int NMINARGS = FUN::NMINARGS; // Data on the device. We need an "inflated" outB, which contains gridSize.y "copies" of out // that will be reduced in the final pass. TYPE *outB; // device array of pointers to device data TYPE **args_d; // Compute on device : grid is 2d and block is 1d int dev = -1; CudaSafeCall(hipGetDevice(&dev)); SetGpuProps(dev); dim3 blockSize; typedef typename FUN::DIMSY DIMSY; const int DIMY = DIMSY::SUM; // warning : blockSize.x was previously set to CUDA_BLOCK_SIZE; currently CUDA_BLOCK_SIZE value is used as a bound. blockSize.x = ::::min(CUDA_BLOCK_SIZE,::::min(maxThreadsPerBlock, (int) (sharedMemPerBlock / ::::max(1, (int)(DIMY*sizeof(TYPE))) ))); // number of threads in each block dim3 gridSize; gridSize.x = nx / blockSize.x + (nx%blockSize.x==0 ? 0 : 1); gridSize.y = ny / blockSize.x + (ny%blockSize.x==0 ? 0 : 1); // Reduce : grid and block are both 1d dim3 blockSize2; blockSize2.x = blockSize.x; // number of threads in each block dim3 gridSize2; gridSize2.x = (nx*DIMRED) / blockSize2.x + ((nx*DIMRED)%blockSize2.x==0 ? 0 : 1); // single hipMalloc void *p_data; CudaSafeCall(hipMalloc(&p_data, sizeof(TYPE*)*NMINARGS + sizeof(TYPE)*(nx*DIMRED*gridSize.y))); args_d = (TYPE **) p_data; CudaSafeCall(hipMemcpy(args_d, args, NMINARGS * sizeof(TYPE *), hipMemcpyHostToDevice)); outB = (TYPE *) (args_d + NMINARGS); // Size of the SharedData : blockSize.x*(DIMY)*sizeof(TYPE) hipLaunchKernelGGL(( GpuConv2DOnDevice<TYPE>), dim3(gridSize),dim3(blockSize),blockSize.x*(DIMY)*sizeof(TYPE), 0, fun,nx,ny,outB,args_d); // block until the device has completed CudaSafeCall(hipDeviceSynchronize()); CudaCheckError(); // Since we've used a 2D scheme, there's still a "blockwise" line reduction to make on // the output array px_d[0] = x1B. We go from shape ( gridSize.y * nx, DIMRED ) to (nx, DIMOUT) hipLaunchKernelGGL(( reduce2D<TYPE,DIMRED,DIMOUT,FUN>), dim3(gridSize2), dim3(blockSize2), 0, 0, outB, out, gridSize.y,nx); // block until the device has completed CudaSafeCall(hipDeviceSynchronize()); CudaCheckError(); CudaSafeCall(hipFree(p_data)); return 0; } // Same wrappers, but for data located on the device template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, int device_id, TYPE *out, Args... args) { // device_id is provided, so we set the GPU device accordingly // Warning : is has to be consistent with location of data CudaSafeCall(hipSetDevice(device_id)); static const int Nargs = sizeof...(Args); TYPE *pargs[Nargs]; unpack(pargs, args...); return Eval_(fun,nx,ny,out,pargs); } // same without the device_id argument template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, TYPE *out, Args... args) { // We set the GPU device on which computations will be performed // to be the GPU on which data is located. // NB. we only check location of x1_d which is the output vector // so we assume that input data is on the same GPU // note : hipPointerGetAttributes has a strange behaviour: // it looks like it makes a copy of the vector on the default GPU device (0) !!! // So we prefer to avoid this and provide directly the device_id as input (first function above) hipPointerAttribute_t attributes; CudaSafeCall(hipPointerGetAttributes(&attributes,out)); return Eval(fun, nx, ny, attributes.device, out, args...); } template < typename TYPE, class FUN > static int Eval(FUN fun, int nx, int ny, TYPE *out, TYPE **pargs, int device_id=-1) { if(device_id==-1) { // We set the GPU device on which computations will be performed // to be the GPU on which data is located. // NB. we only check location of x1_d which is the output vector // so we assume that input data is on the same GPU // note : hipPointerGetAttributes has a strange behaviour: // it looks like it makes a copy of the vector on the default GPU device (0) !!! // So we prefer to avoid this and provide directly the device_id as input (else statement below) hipPointerAttribute_t attributes; CudaSafeCall(hipPointerGetAttributes(&attributes,out)); CudaSafeCall(hipSetDevice(attributes.device)); } else // device_id is provided, so we use it. Warning : is has to be consistent with location of data CudaSafeCall(hipSetDevice(device_id)); return Eval_(fun,nx,ny,out,pargs); } }; }
GpuConv2D.cu
#pragma once #include <stdio.h> #include <sstream> #include <assert.h> #include <cuda.h> #include "core/pack/Pack.h" #include "core/pack/Load.h" #include "core/pack/Call.h" #include "core/pack/GetInds.h" #include "core/pack/GetDims.h" #include "core/utils/CudaErrorCheck.cu" #include "core/utils/CudaSizes.h" #include "core/utils/TypesUtils.h" namespace keops { template <typename T> __device__ static constexpr T static_max_device(T a, T b) { return a < b ? b : a; } template <typename TYPE, int DIMIN, int DIMOUT, class FUN> __global__ void reduce2D(TYPE *in, TYPE *out, int sizeY,int nx) { /* Function used as a final reduction pass in the 2D scheme, * once the block reductions have been made. * Takes as input: * - in, a sizeY * (nx * DIMIN ) array * - out, an nx * DIMOUT array * * Computes, in parallel, the "columnwise"-reduction (which correspond to lines of blocks) * of *in and stores the result in out. */ int tid = blockIdx.x * blockDim.x + threadIdx.x; /* As shown below, the code that is used to store the block-wise sum "tmp" in parallel is: if(i<nx) for(int k=0; k<DIMX1; k++) (*px)[blockIdx.y*DIMX1*nx+i*DIMX1+k] = tmp[k]; */ /* // This code should be a bit more efficient (more parallel) in the case // of a simple "fully parallel" reduction op such as "sum", "max" or "min" TYPE res = 0; if(tid < nx*DIMVECT) { for (int i = 0; i < sizeY; i++) res += in[tid + i*nx*DIMVECT]; // We use "+=" as a reduction op. But it could be anything, really! // res = in[tid+ nx* DIMVECT]; out[tid] = res; } */ // However, for now, we use a "vectorized" reduction op., // which can also handle non-trivial reductions such as "LogSumExp" __TYPEACC__ acc[DIMIN]; typename FUN::template InitializeReduction<__TYPEACC__,TYPE>()(acc); // acc = 0 if(tid < nx) { for (int y = 0; y < sizeY; y++) typename FUN::template ReducePair<__TYPEACC__,TYPE>()(acc, in + (tid+y*nx)*DIMIN); // acc += in[(tid+y*nx) *DIMVECT : +DIMVECT]; typename FUN::template FinalizeOutput<__TYPEACC__,TYPE>()(acc, out+tid*DIMOUT, tid); } } // thread kernel: computation of x1i = sum_j k(x2i,x3i,...,y1j,y2j,...) for index i given by thread id. // N.B.: This routine by itself is generic, and does not specifically refer to the "sum" operation. // It can be used for any Map-Reduce operation, provided that "fun" is well-understood. template < typename TYPE, class FUN > __global__ void GpuConv2DOnDevice(FUN fun, int nx, int ny, TYPE *out, TYPE **args) { /* * px, py and pp are pointers to the device global memory. * They are arrays of arrays with the relevant size: for instance, * px[1] is a TYPE array of size ( nx * DIMSX::VAL(1) ). * * out is the output array, of size (nx * DIMRED). * */ // gets dimensions and number of variables of inputs of function FUN using DIMSX = typename FUN::DIMSX; // DIMSX is a "vector" of templates giving dimensions of xi variables using DIMSY = typename FUN::DIMSY; // DIMSY is a "vector" of templates giving dimensions of yj variables using DIMSP = typename FUN::DIMSP; // DIMSP is a "vector" of templates giving dimensions of parameters variables typedef typename FUN::INDSI INDSI; typedef typename FUN::INDSJ INDSJ; typedef typename FUN::INDSP INDSP; const int DIMX = DIMSX::SUM; // DIMX is sum of dimensions for xi variables const int DIMY = DIMSY::SUM; // DIMY is sum of dimensions for yj variables const int DIMP = DIMSP::SUM; // DIMP is sum of dimensions for parameters variables const int DIMRED = FUN::DIMRED; // dimension of reduction operation const int DIMFOUT = FUN::F::DIM; // DIMFOUT is dimension of output variable of inner function TYPE fout[DIMFOUT]; // Load the parameter vector in the Thread Memory, for improved efficiency //TYPE param_loc[static_max_device(DIMP,1)]; // (Jean :) Direct inlining to compile on Ubuntu 16.04 with nvcc7.5, // which is a standard config in research. For whatever reason, I can't make // it work an other way... Is it bad practice/performance? TYPE param_loc[DIMP < 1 ? 1 : DIMP]; load<DIMSP, INDSP>(0,param_loc,args); // load parameters variables from global memory to local thread memory // Weird syntax to create a pointer in shared memory. extern __shared__ char yj_char[]; TYPE* const yj = reinterpret_cast<TYPE*>(yj_char); // Step 1 : Load in Thread Memory the information needed in the current line --------------------------- int i = blockIdx.x * blockDim.x + threadIdx.x; TYPE xi[DIMX < 1 ? 1 : DIMX]; #if SUM_SCHEME == BLOCK_SUM // N.B. To be consistent with the convention used in GpuConv1D, when SUM_SCHEME == BLOCK_SUM=1 we accumulate results in TYPE // instead of __TYPEACC__ in each block, __TYPEACC__ will be used only to sum up results from each block TYPE acc[DIMRED]; #else __TYPEACC__ acc[DIMRED]; #endif #if SUM_SCHEME == KAHAN_SCHEME const int DIM_KAHAN = FUN::template KahanScheme<__TYPEACC__,TYPE>::DIMACC; TYPE tmp[DIM_KAHAN]; #endif if(i<nx) { // we will compute outi only if i is in the range #if SUM_SCHEME == BLOCK_SUM typename FUN::template InitializeReduction<TYPE,TYPE>()(acc); // acc = 0 #else typename FUN::template InitializeReduction<__TYPEACC__,TYPE>()(acc); // acc = 0 #endif #if SUM_SCHEME == KAHAN_SCHEME VectAssign<DIM_KAHAN>(tmp,0.0f); #endif // Load xi from device global memory. // Remember that we use an interleaved memory scheme where // xi = [ x1i, x2i, x3i, ... ]. load< DIMSX, INDSI>(i,xi,args); // load xi variables from global memory to local thread memory } // Step 2 : Load in Shared Memory the information needed in the current block of the product ----------- // In the 1D scheme, we use a loop to run through the line. // In the 2D scheme presented here, the computation is done in parallel wrt both lines and columns. // Hence, we use "blockId.y" to get our current column number. int j = blockIdx.y * blockDim.x + threadIdx.x; // Same blockDim in x and y : squared tiles. if(j<ny) // we load yj from device global memory only if j<ny load<DIMSY,INDSJ>(j,yj+threadIdx.x*DIMY,args); // load yj variables from global memory to shared memory // More precisely : the j-th line of py is loaded to yj, at a location which depends on the // current threadId. __syncthreads(); // Make sure nobody lags behind // Step 3 : Once the data is loaded, execute fun -------------------------------------------------------- // N.B.: There's no explicit summation here. Just calls to fun, which *accumulates* the results // along the line, but does not *have* to use a "+=" as reduction operator. // In the future, we could provide other reductions: max, min, ... whatever's needed. if(i<nx) { // we compute x1i only if needed TYPE* yjrel = yj; // Loop on the columns of the current block. for(int jrel = 0; (jrel<blockDim.x) && ((blockDim.x*blockIdx.y+jrel)< ny); jrel++, yjrel+=DIMY) { call<DIMSX,DIMSY,DIMSP>(fun,fout,xi,yjrel,param_loc); // Call the function, which outputs results in fout #if SUM_SCHEME == BLOCK_SUM #if USE_HALF int ind = blockDim.x*blockIdx.y+jrel; typename FUN::template ReducePairShort<TYPE,TYPE>()(acc, fout, __floats2half2_rn(2*ind,2*ind+1)); // acc += fout #else typename FUN::template ReducePairShort<TYPE,TYPE>()(acc, fout, blockDim.x*blockIdx.y+jrel); // acc += fout #endif #elif SUM_SCHEME == KAHAN_SCHEME typename FUN::template KahanScheme<__TYPEACC__,TYPE>()(acc, fout, tmp); #else #if USE_HALF int ind = blockDim.x*blockIdx.y+jrel; typename FUN::template ReducePairShort<TYPE,TYPE>()(acc, fout, __floats2half2_rn(2*ind,2*ind+1)); // acc += fout #else typename FUN::template ReducePairShort<TYPE,TYPE>()(acc, fout, blockDim.x*blockIdx.y+jrel); // acc += fout #endif #endif } } __syncthreads(); // Step 4 : Save the result in global memory ----------------------------------------------------------- // The current thread has computed the "linewise-sum" of a small block of the full Kernel Product // matrix, which corresponds to KP[ blockIdx.x * blockDim.x : (blockIdx.x+1) * blockDim.x , // blockIdx.y * blockDim.x : (blockIdx.y+1) * blockDim.x ] // We accumulate it in the output array out, which has in fact gridSize.y * nx // lines of size DIMRED. The final reduction, which "sums over the block lines", // shall be done in a later step. if(i<nx) for(int k=0; k<DIMRED; k++) out[blockIdx.y*DIMRED*nx+i*DIMRED+k] = acc[k]; } /////////////////////////////////////////////////// struct GpuConv2D_FromHost { template < typename TYPE, class FUN > static int Eval_(FUN fun, int nx, int ny, TYPE *out, TYPE **args_h) { using DIMSX = typename FUN::DIMSX; using DIMSY = typename FUN::DIMSY; using DIMSP = typename FUN::DIMSP; typedef typename FUN::INDSI INDSI; typedef typename FUN::INDSJ INDSJ; typedef typename FUN::INDSP INDSP; const int DIMX = DIMSX::SUM; const int DIMY = DIMSY::SUM; const int DIMP = DIMSP::SUM; const int DIMOUT = FUN::DIM; // dimension of output variable const int DIMRED = FUN::DIMRED; // dimension of reduction operation const int SIZEI = DIMSX::SIZE; const int SIZEJ = DIMSY::SIZE; const int SIZEP = DIMSP::SIZE; static const int NMINARGS = FUN::NMINARGS; // Compute on device : grid is 2d and block is 1d int dev = -1; CudaSafeCall(cudaGetDevice(&dev)); SetGpuProps(dev); dim3 blockSize; // warning : blockSize.x was previously set to CUDA_BLOCK_SIZE; currently CUDA_BLOCK_SIZE value is used as a bound. blockSize.x = ::std::min(CUDA_BLOCK_SIZE,::std::min(maxThreadsPerBlock, (int) (sharedMemPerBlock / ::std::max(1, (int)(DIMY*sizeof(TYPE))) ))); // number of threads in each block dim3 gridSize; gridSize.x = nx / blockSize.x + (nx%blockSize.x==0 ? 0 : 1); gridSize.y = ny / blockSize.x + (ny%blockSize.x==0 ? 0 : 1); // Reduce : grid and block are both 1d dim3 blockSize2; blockSize2.x = CUDA_BLOCK_SIZE; // number of threads in each block dim3 gridSize2; gridSize2.x = (nx*DIMRED) / blockSize2.x + ((nx*DIMRED)%blockSize2.x==0 ? 0 : 1); // Data on the device. We need an "inflated" outB, which contains gridSize.y "copies" of out // that will be reduced in the final pass. TYPE *outB, *out_d; // device array of pointers to device data TYPE **args_d; // single cudaMalloc void *p_data; CudaSafeCall(cudaMalloc(&p_data, sizeof(TYPE*)*NMINARGS+sizeof(TYPE)*(DIMP+nx*(DIMX+DIMOUT)+ny*DIMY+nx*DIMRED*gridSize.y))); args_d = (TYPE **) p_data; TYPE *dataloc = (TYPE *) (args_d + NMINARGS); out_d = dataloc; dataloc += nx*DIMOUT; // host array of pointers to device data TYPE *ph[NMINARGS]; for (int k = 0; k < SIZEP; k++) { int indk = INDSP::VAL(k); int nvals = DIMSP::VAL(k); CudaSafeCall(cudaMemcpy(dataloc, args_h[indk], sizeof(TYPE) * nvals, cudaMemcpyHostToDevice)); ph[indk] = dataloc; dataloc += nvals; } for (int k = 0; k < SIZEI; k++) { int indk = INDSI::VAL(k); int nvals = nx * DIMSX::VAL(k); CudaSafeCall(cudaMemcpy(dataloc, args_h[indk], sizeof(TYPE) * nvals, cudaMemcpyHostToDevice)); ph[indk] = dataloc; dataloc += nvals; } for (int k = 0; k < SIZEJ; k++) { int indk = INDSJ::VAL(k); int nvals = ny * DIMSY::VAL(k); CudaSafeCall(cudaMemcpy(dataloc, args_h[indk], sizeof(TYPE) * nvals, cudaMemcpyHostToDevice)); ph[indk] = dataloc; dataloc += nvals; } outB = dataloc; // we write the result before reduction in the "inflated" vector // copy arrays of pointers CudaSafeCall(cudaMemcpy(args_d, ph, NMINARGS * sizeof(TYPE *), cudaMemcpyHostToDevice)); // Size of the SharedData : blockSize.x*(DIMY)*sizeof(TYPE) GpuConv2DOnDevice<TYPE><<<gridSize,blockSize,blockSize.x*(DIMY)*sizeof(TYPE)>>>(fun,nx,ny,outB,args_d); // block until the device has completed CudaSafeCall(cudaDeviceSynchronize()); CudaCheckError(); // Since we've used a 2D scheme, there's still a "blockwise" line reduction to make on // the output array outB. We go from shape ( gridSize.y * nx, DIMRED ) to (nx, DIMOUT) reduce2D<TYPE,DIMRED,DIMOUT,FUN><<<gridSize2, blockSize2>>>(outB, out_d, gridSize.y,nx); // block until the device has completed CudaSafeCall(cudaDeviceSynchronize()); CudaCheckError(); // Send data from device to host. CudaSafeCall(cudaMemcpy(out, out_d, sizeof(TYPE)*(nx*DIMOUT),cudaMemcpyDeviceToHost)); // Free memory. CudaSafeCall(cudaFree(p_data)); return 0; } // Wrapper around GpuConv2D, which takes lists of arrays *x1, *x2, ..., *y1, *y2, ... // and use getlist to enroll them into "pointers arrays" px and py. template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, int device_id, TYPE *out, Args... args) { // We set the GPU device on which computations will be performed if(device_id!=-1) CudaSafeCall(cudaSetDevice(device_id)); static const int Nargs = sizeof...(Args); TYPE *pargs[Nargs]; unpack(pargs, args...); return Eval_(fun,nx,ny,out,pargs); } // same without the device_id argument template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, TYPE *out, Args... args) { return Eval(fun, nx, ny, -1, out, args...); } // Idem, but with args given as an array of arrays, instead of an explicit list of arrays template < typename TYPE, class FUN > static int Eval(FUN fun, int nx, int ny, TYPE *out, TYPE **pargs, int device_id=-1) { // We set the GPU device on which computations will be performed if(device_id!=-1) CudaSafeCall(cudaSetDevice(device_id)); return Eval_(fun,nx,ny,out,pargs); } }; struct GpuConv2D_FromDevice { template < typename TYPE, class FUN > static int Eval_(FUN fun, int nx, int ny, TYPE *out, TYPE **args) { static const int DIMRED = FUN::DIMRED; static const int DIMOUT = FUN::DIM; static const int NMINARGS = FUN::NMINARGS; // Data on the device. We need an "inflated" outB, which contains gridSize.y "copies" of out // that will be reduced in the final pass. TYPE *outB; // device array of pointers to device data TYPE **args_d; // Compute on device : grid is 2d and block is 1d int dev = -1; CudaSafeCall(cudaGetDevice(&dev)); SetGpuProps(dev); dim3 blockSize; typedef typename FUN::DIMSY DIMSY; const int DIMY = DIMSY::SUM; // warning : blockSize.x was previously set to CUDA_BLOCK_SIZE; currently CUDA_BLOCK_SIZE value is used as a bound. blockSize.x = ::std::min(CUDA_BLOCK_SIZE,::std::min(maxThreadsPerBlock, (int) (sharedMemPerBlock / ::std::max(1, (int)(DIMY*sizeof(TYPE))) ))); // number of threads in each block dim3 gridSize; gridSize.x = nx / blockSize.x + (nx%blockSize.x==0 ? 0 : 1); gridSize.y = ny / blockSize.x + (ny%blockSize.x==0 ? 0 : 1); // Reduce : grid and block are both 1d dim3 blockSize2; blockSize2.x = blockSize.x; // number of threads in each block dim3 gridSize2; gridSize2.x = (nx*DIMRED) / blockSize2.x + ((nx*DIMRED)%blockSize2.x==0 ? 0 : 1); // single cudaMalloc void *p_data; CudaSafeCall(cudaMalloc(&p_data, sizeof(TYPE*)*NMINARGS + sizeof(TYPE)*(nx*DIMRED*gridSize.y))); args_d = (TYPE **) p_data; CudaSafeCall(cudaMemcpy(args_d, args, NMINARGS * sizeof(TYPE *), cudaMemcpyHostToDevice)); outB = (TYPE *) (args_d + NMINARGS); // Size of the SharedData : blockSize.x*(DIMY)*sizeof(TYPE) GpuConv2DOnDevice<TYPE><<<gridSize,blockSize,blockSize.x*(DIMY)*sizeof(TYPE)>>>(fun,nx,ny,outB,args_d); // block until the device has completed CudaSafeCall(cudaDeviceSynchronize()); CudaCheckError(); // Since we've used a 2D scheme, there's still a "blockwise" line reduction to make on // the output array px_d[0] = x1B. We go from shape ( gridSize.y * nx, DIMRED ) to (nx, DIMOUT) reduce2D<TYPE,DIMRED,DIMOUT,FUN><<<gridSize2, blockSize2>>>(outB, out, gridSize.y,nx); // block until the device has completed CudaSafeCall(cudaDeviceSynchronize()); CudaCheckError(); CudaSafeCall(cudaFree(p_data)); return 0; } // Same wrappers, but for data located on the device template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, int device_id, TYPE *out, Args... args) { // device_id is provided, so we set the GPU device accordingly // Warning : is has to be consistent with location of data CudaSafeCall(cudaSetDevice(device_id)); static const int Nargs = sizeof...(Args); TYPE *pargs[Nargs]; unpack(pargs, args...); return Eval_(fun,nx,ny,out,pargs); } // same without the device_id argument template < typename TYPE, class FUN, typename... Args > static int Eval(FUN fun, int nx, int ny, TYPE *out, Args... args) { // We set the GPU device on which computations will be performed // to be the GPU on which data is located. // NB. we only check location of x1_d which is the output vector // so we assume that input data is on the same GPU // note : cudaPointerGetAttributes has a strange behaviour: // it looks like it makes a copy of the vector on the default GPU device (0) !!! // So we prefer to avoid this and provide directly the device_id as input (first function above) cudaPointerAttributes attributes; CudaSafeCall(cudaPointerGetAttributes(&attributes,out)); return Eval(fun, nx, ny, attributes.device, out, args...); } template < typename TYPE, class FUN > static int Eval(FUN fun, int nx, int ny, TYPE *out, TYPE **pargs, int device_id=-1) { if(device_id==-1) { // We set the GPU device on which computations will be performed // to be the GPU on which data is located. // NB. we only check location of x1_d which is the output vector // so we assume that input data is on the same GPU // note : cudaPointerGetAttributes has a strange behaviour: // it looks like it makes a copy of the vector on the default GPU device (0) !!! // So we prefer to avoid this and provide directly the device_id as input (else statement below) cudaPointerAttributes attributes; CudaSafeCall(cudaPointerGetAttributes(&attributes,out)); CudaSafeCall(cudaSetDevice(attributes.device)); } else // device_id is provided, so we use it. Warning : is has to be consistent with location of data CudaSafeCall(cudaSetDevice(device_id)); return Eval_(fun,nx,ny,out,pargs); } }; }