hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
ed461a2f1d4e8bcbd71d48e27d87ce8b54fec69f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void AdaptWinningFractionKernel( int s1, float *winningFraction, int *winningCount, float bParam, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { winningFraction[threadId] = winningFraction[threadId] + bParam * ((float)(threadId == s1) - winningFraction[threadId]); winningCount[threadId] = winningCount[threadId] + (threadId == s1) * 1; } }
ed461a2f1d4e8bcbd71d48e27d87ce8b54fec69f.cu
#include "includes.h" __global__ void AdaptWinningFractionKernel( int s1, float *winningFraction, int *winningCount, float bParam, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { winningFraction[threadId] = winningFraction[threadId] + bParam * ((float)(threadId == s1) - winningFraction[threadId]); winningCount[threadId] = winningCount[threadId] + (threadId == s1) * 1; } }
856755741f8fe0293eb4f9ad2367a5c3a5fe78c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/array_ops.h" #include "k2/csrc/macros.h" namespace k2 { // See documentation in header of what this is supposed to do. // This is similar to the template Append() defined in ops_inl.h, // but with changes largely about adding `data_offsets`, and // subtracting one from the dims of all but the last array. Array1<int32_t> SpliceRowSplits(int32_t num_arrays, const Array1<int32_t> **src) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_arrays, 0); ContextPtr &c = src[0]->Context(); // row_splits_vec is the exclusive-sum of the modified dimensions of // the arrays in `src`. `Modified` means: is subtracted from the dims // of all but the last array. std::vector<int32_t> row_splits_vec(num_arrays + 1); int32_t sum = 0; row_splits_vec[0] = sum; // `last_elem_ptrs_vec` contains, for each of the arrays in `num_array`, a // pointer to the last element in that array. std::vector<const int32_t *> last_elem_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) { K2_CHECK_GE(src[i]->Dim(), 1); int32_t dim = src[i]->Dim() - (i + 1 < num_arrays ? 1 : 0); sum += dim; row_splits_vec[i + 1] = sum; last_elem_ptrs_vec[i] = src[i]->Data() + src[i]->Dim() - 1; } int32_t ans_size = sum; Array1<int32_t> ans(c, ans_size); if (ans_size == 0) return ans; int32_t *ans_data = ans.Data(); Array1<const int32_t *> last_elems_ptrs(c, last_elem_ptrs_vec); Array1<int32_t> data_offsets(c, num_arrays); // note as data_offsets.Dim() == last_elem_ptrs.Dim(), so the last element of // last_elem_ptrs.Dim() will not be summed to data_offsets, it's OK as we // don't need that value since we would not drop the last element of the last // array. ExclusiveSumDeref(last_elems_ptrs, &data_offsets); int32_t *data_offsets_data = data_offsets.Data(); if (c->GetDeviceType() == kCpu) { // a simple loop is faster, although the other branches should still work on // CPU. for (int32_t i = 0; i < num_arrays; i++) { int32_t this_dim = src[i]->Dim(); const int32_t *this_src_data = src[i]->Data(); int32_t data_offset = data_offsets_data[i]; for (int32_t j = 0; j < this_dim; j++) { ans_data[j] = this_src_data[j] + data_offset; } // notice `this_dim - 1` here, it means we will overwrite the copy of last // element of src[i] when copying elements in src[i+1] in the next // for-loop, it generates the same result with dropping the last element // of src[i] as last-elment-of-src[i] == src[i+1]->Data()[0] (equals 0) + // data_offsets_data[i+1]. ans_data += this_dim - 1; } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits(c, row_splits_vec); std::vector<const int32_t *> src_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) src_ptrs_vec[i] = src[i]->Data(); Array1<const int32_t *> src_ptrs(c, src_ptrs_vec); const int32_t **src_ptrs_data = src_ptrs.Data(); mgpu::context_t *mgpu_context = GetModernGpuAllocator(c); auto lambda_set_ans = [=] __device__(int32_t index, int32_t seg, int32_t rank) { ans_data[index] = src_ptrs_data[seg][rank] + data_offsets_data[seg]; }; K2_CUDA_SAFE_CALL(mgpu::transform_lbs(lambda_set_ans, ans_size, row_splits.Data(), row_splits.Dim() - 1, *mgpu_context)); } return ans; } Array1<int32_t> AppendWithOffsets(const Array1<int32_t> &offsets, const Array1<int32_t> **src) { NVTX_RANGE(K2_FUNC); int32_t num_arrays = offsets.Dim(); ContextPtr c = offsets.Context(); std::vector<int32_t> row_splits_vec(num_arrays + 1); int32_t sum = 0; row_splits_vec[0] = sum; for (int32_t i = 0; i < num_arrays; ++i) { int32_t dim = src[i]->Dim(); sum += dim; row_splits_vec[i + 1] = sum; } int32_t ans_size = sum; Array1<int32_t> ans(c, ans_size); if (ans_size == 0) return ans; int32_t *ans_data = ans.Data(); const int32_t *offsets_data = offsets.Data(); if (c->GetDeviceType() == kCpu) { for (int32_t i = 0; i != num_arrays; ++i) { int32_t this_dim = src[i]->Dim(); const int32_t *this_src_data = src[i]->Data(); int32_t offset = offsets_data[i]; for (int32_t j = 0; j != this_dim; ++j) { ans_data[j] = this_src_data[j] + offset; } ans_data += this_dim; } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits(c, row_splits_vec); std::vector<const int32_t *> src_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; ++i) src_ptrs_vec[i] = src[i]->Data(); Array1<const int32_t *> src_ptrs(c, src_ptrs_vec); const int32_t **src_ptrs_data = src_ptrs.Data(); mgpu::context_t *mgpu_context = GetModernGpuAllocator(c); // `index` is idx01, `seg` is idx0, `rank` is idx1, `value_offsets` is just // a cache for `offsets_data`. auto lambda_set_ans = [=] __device__(int32_t index, int32_t seg, int32_t rank, mgpu::tuple<int32_t> value_offsets) { ans_data[index] = src_ptrs_data[seg][rank] + mgpu::get<0>(value_offsets); }; K2_CUDA_SAFE_CALL(mgpu::transform_lbs( lambda_set_ans, ans_size, row_splits.Data(), row_splits.Dim() - 1, mgpu::make_tuple(offsets_data), *mgpu_context)); } return ans; } bool ValidateRowIds(const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &ctx = row_ids.Context(); const int32_t *data = row_ids.Data(); int32_t dim = row_ids.Dim(); if (dim == 0) return true; // will treat this as valid // note `row_ids[0]` may copy memory from device to host if (row_ids[0] < 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_ids, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. K2_EVAL( ctx, dim - 1, lambda_check_row_ids, (int32_t i)->void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }); return (*temp)[0] == 0; } bool ValidateRowSplits(const Array1<int32_t> &row_splits, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &ctx = row_splits.Context(); const int32_t *data = row_splits.Data(); int32_t dim = row_splits.Dim(); // must have at least one element and row_splits[0] == 0 if (dim == 0 || row_splits[0] != 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_splits, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. K2_EVAL( ctx, dim - 1, lambda_check_row_splits, (int32_t i)->void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }); return (*temp)[0] == 0; } bool ValidateRowSplitsAndIds(const Array1<int32_t> &row_splits, const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); // Check if their context are compatible or not while getting ContextPtr ctx = GetContext(row_splits, row_ids); int32_t num_rows = row_splits.Dim() - 1, num_elems = row_ids.Dim(); if (num_rows < 0 || (num_rows == 0 && num_elems > 0)) return false; if (row_splits[0] != 0 || (num_elems > 0 && row_ids[0] < 0)) return false; if (num_elems != row_splits[num_rows]) return false; const int32_t *row_ids_data = row_ids.Data(), *row_splits_data = row_splits.Data(); Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(ctx->IsCompatible(*temp->Context())); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp_array.Data(); K2_EVAL( ctx, ::max(num_elems, num_rows), lambda_check_row_ids, (int32_t i)->void { // check row_splits bool invalid_splits = (i < num_rows && row_splits_data[i] > row_splits_data[i + 1]); // check row_ids bool invalid_ids = (i < (num_elems - 1) && row_ids_data[i] > row_ids_data[i + 1]); if (invalid_splits || invalid_ids) *temp_data = 1; // check if row_splits and row_ids agree with each other if (i < num_elems) { int32_t this_row = row_ids_data[i]; if (this_row < 0 || this_row >= num_rows || i < row_splits_data[this_row] || i >= row_splits_data[this_row + 1]) *temp_data = 1; } }); return (*temp)[0] == 0; } void RowSplitsToRowIds(const Array1<int32_t> &row_splits, Array1<int32_t> *row_ids) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(row_splits, *row_ids); int32_t num_elems = row_ids->Dim(), num_rows = row_splits.Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); K2_CHECK_EQ(num_elems, row_splits[num_rows]); RowSplitsToRowIds(c, num_rows, row_splits.Data(), num_elems, row_ids->Data()); } void RowIdsToRowSplits(const Array1<int32_t> &row_ids, Array1<int32_t> *row_splits) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(*row_splits, row_ids); int32_t num_elems = row_ids.Dim(), num_rows = row_splits->Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); if (num_elems > 0) K2_CHECK_GT(num_rows, row_ids[num_elems - 1]); RowIdsToRowSplits(c, num_elems, row_ids.Data(), false, num_rows, row_splits->Data()); } Array1<int32_t> GetCounts(ContextPtr c, const int32_t *src_data, int32_t src_dim, int32_t n) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(n, 0); Array1<int32_t> ans(c, n, 0); // init with 0 int32_t *ans_data = ans.Data(); if (n == 0) { K2_CHECK_EQ(src_dim, 0); return ans; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { for (int32_t i = 0; i < src_dim; ++i) { ++ans_data[src_data[i]]; } } else { K2_CHECK_EQ(d, kCuda); std::size_t temp_storage_bytes = 0; K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( nullptr, temp_storage_bytes, src_data, ans_data, n + 1, 0, n, src_dim, c->GetCudaStream())); // The first time is to determine temporary // device storage requirements. Array1<int8_t> d_temp_storage(c, temp_storage_bytes); K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( d_temp_storage.Data(), temp_storage_bytes, src_data, ans_data, n + 1, 0, n, src_dim, c->GetCudaStream())); } return ans; } Array1<int32_t> GetCounts(const Array1<int32_t> &src, int32_t n) { NVTX_RANGE(K2_FUNC); return GetCounts(src.Context(), src.Data(), src.Dim(), n); } Array1<int32_t> InvertMonotonicDecreasing(const Array1<int32_t> &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); int32_t src_dim = src.Dim(); const int32_t *src_data = src.Data(); if (src_dim == 0) { return Array1<int32_t>(c, 0); } K2_DCHECK_GT(src.Back(), 0); // just call Back when debugging // note `src[0]` may do a DeviceToHost memory copy int32_t ans_dim = src[0]; Array1<int32_t> ans(c, ans_dim, 0); // init with 0 int32_t *ans_data = ans.Data(); K2_EVAL( c, src_dim, lambda_set_values, (int32_t i)->void { K2_DCHECK((i + 1 == src_dim || src_data[i + 1] <= src_data[i])); if (i + 1 == src_dim || src_data[i + 1] < src_data[i]) ans_data[src_data[i] - 1] = i + 1; }); MonotonicDecreasingUpperBound(ans, &ans); #ifndef NDEBUG K2_EVAL( c, ans_dim, lambda_check_values, (int32_t i) -> void { int32_t j = ans_data[i]; K2_CHECK((j == src_dim || src_data[j] <= i) && (j == 0 || src_data[j-1] > i)); }); #endif return ans; } Array1<int32_t> InvertPermutation(const Array1<int32_t> &src) { ContextPtr &c = src.Context(); int32_t dim = src.Dim(); Array1<int32_t> ans(c, dim); const int32_t *src_data = src.Data(); int32_t *ans_data = ans.Data(); K2_EVAL( c, dim, lambda_set_ans, (int32_t i)->void { ans_data[src_data[i]] = i; }); return ans; } Array1<int32_t> RowSplitsToSizes(const Array1<int32_t> &row_splits) { K2_CHECK_GT(row_splits.Dim(), 0); ContextPtr &c = row_splits.Context(); int32_t num_rows = row_splits.Dim() - 1; Array1<int32_t> sizes(c, num_rows); const int32_t *row_splits_data = row_splits.Data(); int32_t *sizes_data = sizes.Data(); K2_EVAL( c, num_rows, lambda_set_sizes, (int32_t i)->void { sizes_data[i] = row_splits_data[i + 1] - row_splits_data[i]; }); return sizes; } // This is modified from RowSplitsToRowIdsKernel. // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void SizesToMergeMapKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, uint32_t *merge_map) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; #pragma unroll(4) for (; thread_this_row < row_length; thread_this_row += threads_per_row) merge_map[this_row_split + thread_this_row] = uint32_t(row) + uint32_t(num_rows) * uint32_t(thread_this_row); } Array1<uint32_t> SizesToMergeMap(ContextPtr c, const std::vector<int32_t> &sizes) { int32_t num_srcs = sizes.size(); ContextPtr cpu_context = GetCpuContext(); Array1<int32_t> row_splits_cpu(cpu_context, num_srcs + 1); int32_t *row_splits_cpu_data = row_splits_cpu.Data(); int32_t tot_size = 0; row_splits_cpu_data[0] = 0; for (int32_t i = 0; i != num_srcs; ++i) { tot_size += sizes[i]; row_splits_cpu_data[i + 1] = tot_size; } Array1<uint32_t> ans(c, tot_size); if (tot_size == 0) return ans; uint32_t *ans_data = ans.Data(); if (c->GetDeviceType() == kCpu) { int32_t cur = 0; for (int32_t src = 0; src != num_srcs; ++src) { int32_t begin = cur, // i.e. the previous end. end = row_splits_cpu_data[src + 1]; for (; cur != end; ++cur) { // the 'src' says which source this item came from, and (cur - begin) // is the position within that source. ans_data[cur] = uint32_t(src) + uint32_t(cur - begin) * uint32_t(num_srcs); } } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits = row_splits_cpu.To(c); #if 1 int32_t avg_elems_per_row = (tot_size + num_srcs - 1) / num_srcs, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_srcs * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL( hipLaunchKernelGGL(( SizesToMergeMapKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_srcs, threads_per_row, row_splits.Data(), tot_size, ans.Data())); #else // Below version can be just faster than the above version when // num_srcs > 5000 and tot_size > 1,000,000 mgpu::context_t *mgpu_context = GetModernGpuAllocator(c); auto lambda_set_ans = [=] __device__(uint32_t index, uint32_t seg, uint32_t rank) { ans_data[index] = seg + rank * static_cast<uint32_t>(num_srcs); }; K2_CUDA_SAFE_CALL(mgpu::transform_lbs(lambda_set_ans, tot_size, row_splits.Data(), row_splits.Dim() - 1, *mgpu_context)); #endif } return ans; } bool IsPermutation(const Array1<int32_t> &a) { Array1<int32_t> ones(a.Context(), a.Dim(), 1); int32_t *ones_data = ones.Data(); const int32_t *a_data = a.Data(); int32_t dim = a.Dim(); K2_EVAL( a.Context(), a.Dim(), lambda_set_zero, (int32_t i)->void { if (static_cast<uint32_t>(a_data[i]) < static_cast<uint32_t>(dim)) { ones_data[a_data[i]] = 0; } }); return Equal(ones, 0); } void RowSplitsToRowIdsOffset(const Array1<int32_t> &row_splits_part, Array1<int32_t> *row_ids_part) { ContextPtr c = row_splits_part.Context(); Array1<int32_t> row_splits(c, row_splits_part.Dim()); int32_t *row_splits_data = row_splits.Data(); const int32_t *row_splits_part_data = row_splits_part.Data(); K2_EVAL(c, row_splits_part.Dim(), lambda_subtract_offset, (int32_t i) { row_splits_data[i] = row_splits_part_data[i] - row_splits_part_data[0]; }); RowSplitsToRowIds(row_splits, row_ids_part); } } // namespace k2
856755741f8fe0293eb4f9ad2367a5c3a5fe78c5.cu
/** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/array_ops.h" #include "k2/csrc/macros.h" namespace k2 { // See documentation in header of what this is supposed to do. // This is similar to the template Append() defined in ops_inl.h, // but with changes largely about adding `data_offsets`, and // subtracting one from the dims of all but the last array. Array1<int32_t> SpliceRowSplits(int32_t num_arrays, const Array1<int32_t> **src) { NVTX_RANGE(K2_FUNC); K2_CHECK_GT(num_arrays, 0); ContextPtr &c = src[0]->Context(); // row_splits_vec is the exclusive-sum of the modified dimensions of // the arrays in `src`. `Modified` means: is subtracted from the dims // of all but the last array. std::vector<int32_t> row_splits_vec(num_arrays + 1); int32_t sum = 0; row_splits_vec[0] = sum; // `last_elem_ptrs_vec` contains, for each of the arrays in `num_array`, a // pointer to the last element in that array. std::vector<const int32_t *> last_elem_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) { K2_CHECK_GE(src[i]->Dim(), 1); int32_t dim = src[i]->Dim() - (i + 1 < num_arrays ? 1 : 0); sum += dim; row_splits_vec[i + 1] = sum; last_elem_ptrs_vec[i] = src[i]->Data() + src[i]->Dim() - 1; } int32_t ans_size = sum; Array1<int32_t> ans(c, ans_size); if (ans_size == 0) return ans; int32_t *ans_data = ans.Data(); Array1<const int32_t *> last_elems_ptrs(c, last_elem_ptrs_vec); Array1<int32_t> data_offsets(c, num_arrays); // note as data_offsets.Dim() == last_elem_ptrs.Dim(), so the last element of // last_elem_ptrs.Dim() will not be summed to data_offsets, it's OK as we // don't need that value since we would not drop the last element of the last // array. ExclusiveSumDeref(last_elems_ptrs, &data_offsets); int32_t *data_offsets_data = data_offsets.Data(); if (c->GetDeviceType() == kCpu) { // a simple loop is faster, although the other branches should still work on // CPU. for (int32_t i = 0; i < num_arrays; i++) { int32_t this_dim = src[i]->Dim(); const int32_t *this_src_data = src[i]->Data(); int32_t data_offset = data_offsets_data[i]; for (int32_t j = 0; j < this_dim; j++) { ans_data[j] = this_src_data[j] + data_offset; } // notice `this_dim - 1` here, it means we will overwrite the copy of last // element of src[i] when copying elements in src[i+1] in the next // for-loop, it generates the same result with dropping the last element // of src[i] as last-elment-of-src[i] == src[i+1]->Data()[0] (equals 0) + // data_offsets_data[i+1]. ans_data += this_dim - 1; } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits(c, row_splits_vec); std::vector<const int32_t *> src_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; i++) src_ptrs_vec[i] = src[i]->Data(); Array1<const int32_t *> src_ptrs(c, src_ptrs_vec); const int32_t **src_ptrs_data = src_ptrs.Data(); mgpu::context_t *mgpu_context = GetModernGpuAllocator(c); auto lambda_set_ans = [=] __device__(int32_t index, int32_t seg, int32_t rank) { ans_data[index] = src_ptrs_data[seg][rank] + data_offsets_data[seg]; }; K2_CUDA_SAFE_CALL(mgpu::transform_lbs(lambda_set_ans, ans_size, row_splits.Data(), row_splits.Dim() - 1, *mgpu_context)); } return ans; } Array1<int32_t> AppendWithOffsets(const Array1<int32_t> &offsets, const Array1<int32_t> **src) { NVTX_RANGE(K2_FUNC); int32_t num_arrays = offsets.Dim(); ContextPtr c = offsets.Context(); std::vector<int32_t> row_splits_vec(num_arrays + 1); int32_t sum = 0; row_splits_vec[0] = sum; for (int32_t i = 0; i < num_arrays; ++i) { int32_t dim = src[i]->Dim(); sum += dim; row_splits_vec[i + 1] = sum; } int32_t ans_size = sum; Array1<int32_t> ans(c, ans_size); if (ans_size == 0) return ans; int32_t *ans_data = ans.Data(); const int32_t *offsets_data = offsets.Data(); if (c->GetDeviceType() == kCpu) { for (int32_t i = 0; i != num_arrays; ++i) { int32_t this_dim = src[i]->Dim(); const int32_t *this_src_data = src[i]->Data(); int32_t offset = offsets_data[i]; for (int32_t j = 0; j != this_dim; ++j) { ans_data[j] = this_src_data[j] + offset; } ans_data += this_dim; } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits(c, row_splits_vec); std::vector<const int32_t *> src_ptrs_vec(num_arrays); for (int32_t i = 0; i < num_arrays; ++i) src_ptrs_vec[i] = src[i]->Data(); Array1<const int32_t *> src_ptrs(c, src_ptrs_vec); const int32_t **src_ptrs_data = src_ptrs.Data(); mgpu::context_t *mgpu_context = GetModernGpuAllocator(c); // `index` is idx01, `seg` is idx0, `rank` is idx1, `value_offsets` is just // a cache for `offsets_data`. auto lambda_set_ans = [=] __device__(int32_t index, int32_t seg, int32_t rank, mgpu::tuple<int32_t> value_offsets) { ans_data[index] = src_ptrs_data[seg][rank] + mgpu::get<0>(value_offsets); }; K2_CUDA_SAFE_CALL(mgpu::transform_lbs( lambda_set_ans, ans_size, row_splits.Data(), row_splits.Dim() - 1, mgpu::make_tuple(offsets_data), *mgpu_context)); } return ans; } bool ValidateRowIds(const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &ctx = row_ids.Context(); const int32_t *data = row_ids.Data(); int32_t dim = row_ids.Dim(); if (dim == 0) return true; // will treat this as valid // note `row_ids[0]` may copy memory from device to host if (row_ids[0] < 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_ids, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. K2_EVAL( ctx, dim - 1, lambda_check_row_ids, (int32_t i)->void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }); return (*temp)[0] == 0; } bool ValidateRowSplits(const Array1<int32_t> &row_splits, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &ctx = row_splits.Context(); const int32_t *data = row_splits.Data(); int32_t dim = row_splits.Dim(); // must have at least one element and row_splits[0] == 0 if (dim == 0 || row_splits[0] != 0) return false; Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(IsCompatible(row_splits, *temp)); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp->Data(); // Note: we know that dim >= 1 as we would have returned above if dim == 0. // This will do nothing if (dim-1) == 0 as we have checked the first element. K2_EVAL( ctx, dim - 1, lambda_check_row_splits, (int32_t i)->void { if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad. }); return (*temp)[0] == 0; } bool ValidateRowSplitsAndIds(const Array1<int32_t> &row_splits, const Array1<int32_t> &row_ids, Array1<int32_t> *temp /*=nullptr*/) { NVTX_RANGE(K2_FUNC); // Check if their context are compatible or not while getting ContextPtr ctx = GetContext(row_splits, row_ids); int32_t num_rows = row_splits.Dim() - 1, num_elems = row_ids.Dim(); if (num_rows < 0 || (num_rows == 0 && num_elems > 0)) return false; if (row_splits[0] != 0 || (num_elems > 0 && row_ids[0] < 0)) return false; if (num_elems != row_splits[num_rows]) return false; const int32_t *row_ids_data = row_ids.Data(), *row_splits_data = row_splits.Data(); Array1<int32_t> temp_array; if (temp == nullptr || temp->Dim() == 0) { temp_array = Array1<int32_t>(ctx, 1); } else { K2_CHECK(ctx->IsCompatible(*temp->Context())); temp_array = temp->Range(0, 1); } temp = &temp_array; *temp = 0; int32_t *temp_data = temp_array.Data(); K2_EVAL( ctx, std::max(num_elems, num_rows), lambda_check_row_ids, (int32_t i)->void { // check row_splits bool invalid_splits = (i < num_rows && row_splits_data[i] > row_splits_data[i + 1]); // check row_ids bool invalid_ids = (i < (num_elems - 1) && row_ids_data[i] > row_ids_data[i + 1]); if (invalid_splits || invalid_ids) *temp_data = 1; // check if row_splits and row_ids agree with each other if (i < num_elems) { int32_t this_row = row_ids_data[i]; if (this_row < 0 || this_row >= num_rows || i < row_splits_data[this_row] || i >= row_splits_data[this_row + 1]) *temp_data = 1; } }); return (*temp)[0] == 0; } void RowSplitsToRowIds(const Array1<int32_t> &row_splits, Array1<int32_t> *row_ids) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(row_splits, *row_ids); int32_t num_elems = row_ids->Dim(), num_rows = row_splits.Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); K2_CHECK_EQ(num_elems, row_splits[num_rows]); RowSplitsToRowIds(c, num_rows, row_splits.Data(), num_elems, row_ids->Data()); } void RowIdsToRowSplits(const Array1<int32_t> &row_ids, Array1<int32_t> *row_splits) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(*row_splits, row_ids); int32_t num_elems = row_ids.Dim(), num_rows = row_splits->Dim() - 1; K2_CHECK_GE(num_rows, 0); // if there are more than zero elems, there must be at least one row. K2_CHECK(num_elems == 0 || num_rows > 0); if (num_elems > 0) K2_CHECK_GT(num_rows, row_ids[num_elems - 1]); RowIdsToRowSplits(c, num_elems, row_ids.Data(), false, num_rows, row_splits->Data()); } Array1<int32_t> GetCounts(ContextPtr c, const int32_t *src_data, int32_t src_dim, int32_t n) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(n, 0); Array1<int32_t> ans(c, n, 0); // init with 0 int32_t *ans_data = ans.Data(); if (n == 0) { K2_CHECK_EQ(src_dim, 0); return ans; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { for (int32_t i = 0; i < src_dim; ++i) { ++ans_data[src_data[i]]; } } else { K2_CHECK_EQ(d, kCuda); std::size_t temp_storage_bytes = 0; K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( nullptr, temp_storage_bytes, src_data, ans_data, n + 1, 0, n, src_dim, c->GetCudaStream())); // The first time is to determine temporary // device storage requirements. Array1<int8_t> d_temp_storage(c, temp_storage_bytes); K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven( d_temp_storage.Data(), temp_storage_bytes, src_data, ans_data, n + 1, 0, n, src_dim, c->GetCudaStream())); } return ans; } Array1<int32_t> GetCounts(const Array1<int32_t> &src, int32_t n) { NVTX_RANGE(K2_FUNC); return GetCounts(src.Context(), src.Data(), src.Dim(), n); } Array1<int32_t> InvertMonotonicDecreasing(const Array1<int32_t> &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); int32_t src_dim = src.Dim(); const int32_t *src_data = src.Data(); if (src_dim == 0) { return Array1<int32_t>(c, 0); } K2_DCHECK_GT(src.Back(), 0); // just call Back when debugging // note `src[0]` may do a DeviceToHost memory copy int32_t ans_dim = src[0]; Array1<int32_t> ans(c, ans_dim, 0); // init with 0 int32_t *ans_data = ans.Data(); K2_EVAL( c, src_dim, lambda_set_values, (int32_t i)->void { K2_DCHECK((i + 1 == src_dim || src_data[i + 1] <= src_data[i])); if (i + 1 == src_dim || src_data[i + 1] < src_data[i]) ans_data[src_data[i] - 1] = i + 1; }); MonotonicDecreasingUpperBound(ans, &ans); #ifndef NDEBUG K2_EVAL( c, ans_dim, lambda_check_values, (int32_t i) -> void { int32_t j = ans_data[i]; K2_CHECK((j == src_dim || src_data[j] <= i) && (j == 0 || src_data[j-1] > i)); }); #endif return ans; } Array1<int32_t> InvertPermutation(const Array1<int32_t> &src) { ContextPtr &c = src.Context(); int32_t dim = src.Dim(); Array1<int32_t> ans(c, dim); const int32_t *src_data = src.Data(); int32_t *ans_data = ans.Data(); K2_EVAL( c, dim, lambda_set_ans, (int32_t i)->void { ans_data[src_data[i]] = i; }); return ans; } Array1<int32_t> RowSplitsToSizes(const Array1<int32_t> &row_splits) { K2_CHECK_GT(row_splits.Dim(), 0); ContextPtr &c = row_splits.Context(); int32_t num_rows = row_splits.Dim() - 1; Array1<int32_t> sizes(c, num_rows); const int32_t *row_splits_data = row_splits.Data(); int32_t *sizes_data = sizes.Data(); K2_EVAL( c, num_rows, lambda_set_sizes, (int32_t i)->void { sizes_data[i] = row_splits_data[i + 1] - row_splits_data[i]; }); return sizes; } // This is modified from RowSplitsToRowIdsKernel. // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void SizesToMergeMapKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, uint32_t *merge_map) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; #pragma unroll(4) for (; thread_this_row < row_length; thread_this_row += threads_per_row) merge_map[this_row_split + thread_this_row] = uint32_t(row) + uint32_t(num_rows) * uint32_t(thread_this_row); } Array1<uint32_t> SizesToMergeMap(ContextPtr c, const std::vector<int32_t> &sizes) { int32_t num_srcs = sizes.size(); ContextPtr cpu_context = GetCpuContext(); Array1<int32_t> row_splits_cpu(cpu_context, num_srcs + 1); int32_t *row_splits_cpu_data = row_splits_cpu.Data(); int32_t tot_size = 0; row_splits_cpu_data[0] = 0; for (int32_t i = 0; i != num_srcs; ++i) { tot_size += sizes[i]; row_splits_cpu_data[i + 1] = tot_size; } Array1<uint32_t> ans(c, tot_size); if (tot_size == 0) return ans; uint32_t *ans_data = ans.Data(); if (c->GetDeviceType() == kCpu) { int32_t cur = 0; for (int32_t src = 0; src != num_srcs; ++src) { int32_t begin = cur, // i.e. the previous end. end = row_splits_cpu_data[src + 1]; for (; cur != end; ++cur) { // the 'src' says which source this item came from, and (cur - begin) // is the position within that source. ans_data[cur] = uint32_t(src) + uint32_t(cur - begin) * uint32_t(num_srcs); } } } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); Array1<int32_t> row_splits = row_splits_cpu.To(c); #if 1 int32_t avg_elems_per_row = (tot_size + num_srcs - 1) / num_srcs, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_srcs * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL( SizesToMergeMapKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_srcs, threads_per_row, row_splits.Data(), tot_size, ans.Data())); #else // Below version can be just faster than the above version when // num_srcs > 5000 and tot_size > 1,000,000 mgpu::context_t *mgpu_context = GetModernGpuAllocator(c); auto lambda_set_ans = [=] __device__(uint32_t index, uint32_t seg, uint32_t rank) { ans_data[index] = seg + rank * static_cast<uint32_t>(num_srcs); }; K2_CUDA_SAFE_CALL(mgpu::transform_lbs(lambda_set_ans, tot_size, row_splits.Data(), row_splits.Dim() - 1, *mgpu_context)); #endif } return ans; } bool IsPermutation(const Array1<int32_t> &a) { Array1<int32_t> ones(a.Context(), a.Dim(), 1); int32_t *ones_data = ones.Data(); const int32_t *a_data = a.Data(); int32_t dim = a.Dim(); K2_EVAL( a.Context(), a.Dim(), lambda_set_zero, (int32_t i)->void { if (static_cast<uint32_t>(a_data[i]) < static_cast<uint32_t>(dim)) { ones_data[a_data[i]] = 0; } }); return Equal(ones, 0); } void RowSplitsToRowIdsOffset(const Array1<int32_t> &row_splits_part, Array1<int32_t> *row_ids_part) { ContextPtr c = row_splits_part.Context(); Array1<int32_t> row_splits(c, row_splits_part.Dim()); int32_t *row_splits_data = row_splits.Data(); const int32_t *row_splits_part_data = row_splits_part.Data(); K2_EVAL(c, row_splits_part.Dim(), lambda_subtract_offset, (int32_t i) { row_splits_data[i] = row_splits_part_data[i] - row_splits_part_data[0]; }); RowSplitsToRowIds(row_splits, row_ids_part); } } // namespace k2
93b5bad370017cf5fd00052fa8fab3fbe43ec7bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <stdlib.h> #include <iostream> #include <string> #include <fstream> static void HandleError( hipError_t err, const char *file, int line) { if (err != hipSuccess) { std::cout << hipGetErrorString( err ) << " in " << file << " line " << line << std::endl; exit(EXIT_FAILURE); } } #define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__)) #define IMG_WIDTH 2024 #define IMG_HEIGHT 2024 #define SPHERES 10 #define INF 2e10f #define rnd(x) (x*rand() / (float)RAND_MAX) class Sphere { public: float r,g,b; float radius; float x,y,z; __device__ float hit(float ox, float oy, float *n) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf(radius*radius - dx*dx - dy*dy); *n = dz/sqrtf(radius*radius); return dz+z; } return -INF; } }; __constant__ Sphere dev_s[SPHERES]; __global__ void kernel(int *ptr) { //, Sphere *dev_s) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float ox = (x - (float)IMG_WIDTH/2); float oy = (y - (float)IMG_HEIGHT/2); float r=0, g=0, b=0; float maxz = -INF; for (int i=0; i < SPHERES; i++) { float n; float t = dev_s[i].hit(ox,oy,&n); if (t > maxz) { float fscale = n; r = dev_s[i].r * fscale; g = dev_s[i].g * fscale; b = dev_s[i].b * fscale; } } ptr[offset*3 + 0] = (int) 255 * r; ptr[offset*3 + 1] = (int) 255 * g; ptr[offset*3 + 2] = (int) 255 * b; } int main( void ) { // Init img on host int img_size = IMG_WIDTH*IMG_HEIGHT*3; size_t img_size_t = (size_t)IMG_WIDTH*IMG_HEIGHT*3*sizeof(float); int *img; img = (int*)malloc(img_size_t); for (int i=0; i<img_size; i+=3) { // init empty img img[i+0] = 0; img[i+1] = 0; img[i+2] = 0; } // Init spheres on host Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES ); for (int i=0; i<SPHERES; i++) { temp_s[i].r = (float) rnd(1.0f); temp_s[i].g = (float) rnd(1.0f); temp_s[i].b = (float) rnd(1.0f); temp_s[i].x = (float) rnd(1000.0f) - 500; temp_s[i].y = (float) rnd(1000.0f) - 500; temp_s[i].z = (float) rnd(1000.0f) - 500; temp_s[i].radius = (float) rnd(100.0f) + 20; } hipEvent_t start, stop; HANDLE_ERROR( hipEventCreate( &start ) ); HANDLE_ERROR( hipEventCreate( &stop ) ); HANDLE_ERROR( hipEventRecord( start,0 ) ); int *dev_img; HANDLE_ERROR(hipMalloc(&dev_img, img_size_t)); HANDLE_ERROR(hipMemcpyToSymbol( dev_s, temp_s, sizeof(Sphere) * (size_t)SPHERES)); free(temp_s); dim3 grids(IMG_WIDTH/16,IMG_HEIGHT/16); dim3 threads(16,16); hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads), 0, 0, dev_img); HANDLE_ERROR(hipMemcpy( img, dev_img, img_size_t, hipMemcpyDeviceToHost)); HANDLE_ERROR(hipFree(dev_img)); HANDLE_ERROR( hipEventRecord( stop,0 ) ); HANDLE_ERROR( hipEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop )); std::cout << "Time to generate: " << elapsedTime << "ms" << std::endl; // write img std::ofstream ofs; ofs.open("img.ppm"); ofs << "P3\n" << IMG_WIDTH << " " << IMG_HEIGHT << "\n255\n"; for (int i=0; i<img_size; i+=3) { ofs << img[i+0] << " " << img[i+1] << " " << img[i+2] << "\n"; } ofs.close(); return 0; }
93b5bad370017cf5fd00052fa8fab3fbe43ec7bb.cu
#include <cmath> #include <stdlib.h> #include <iostream> #include <string> #include <fstream> static void HandleError( cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { std::cout << cudaGetErrorString( err ) << " in " << file << " line " << line << std::endl; exit(EXIT_FAILURE); } } #define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__)) #define IMG_WIDTH 2024 #define IMG_HEIGHT 2024 #define SPHERES 10 #define INF 2e10f #define rnd(x) (x*rand() / (float)RAND_MAX) class Sphere { public: float r,g,b; float radius; float x,y,z; __device__ float hit(float ox, float oy, float *n) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf(radius*radius - dx*dx - dy*dy); *n = dz/sqrtf(radius*radius); return dz+z; } return -INF; } }; __constant__ Sphere dev_s[SPHERES]; __global__ void kernel(int *ptr) { //, Sphere *dev_s) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float ox = (x - (float)IMG_WIDTH/2); float oy = (y - (float)IMG_HEIGHT/2); float r=0, g=0, b=0; float maxz = -INF; for (int i=0; i < SPHERES; i++) { float n; float t = dev_s[i].hit(ox,oy,&n); if (t > maxz) { float fscale = n; r = dev_s[i].r * fscale; g = dev_s[i].g * fscale; b = dev_s[i].b * fscale; } } ptr[offset*3 + 0] = (int) 255 * r; ptr[offset*3 + 1] = (int) 255 * g; ptr[offset*3 + 2] = (int) 255 * b; } int main( void ) { // Init img on host int img_size = IMG_WIDTH*IMG_HEIGHT*3; size_t img_size_t = (size_t)IMG_WIDTH*IMG_HEIGHT*3*sizeof(float); int *img; img = (int*)malloc(img_size_t); for (int i=0; i<img_size; i+=3) { // init empty img img[i+0] = 0; img[i+1] = 0; img[i+2] = 0; } // Init spheres on host Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES ); for (int i=0; i<SPHERES; i++) { temp_s[i].r = (float) rnd(1.0f); temp_s[i].g = (float) rnd(1.0f); temp_s[i].b = (float) rnd(1.0f); temp_s[i].x = (float) rnd(1000.0f) - 500; temp_s[i].y = (float) rnd(1000.0f) - 500; temp_s[i].z = (float) rnd(1000.0f) - 500; temp_s[i].radius = (float) rnd(100.0f) + 20; } cudaEvent_t start, stop; HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); HANDLE_ERROR( cudaEventRecord( start,0 ) ); int *dev_img; HANDLE_ERROR(cudaMalloc(&dev_img, img_size_t)); HANDLE_ERROR(cudaMemcpyToSymbol( dev_s, temp_s, sizeof(Sphere) * (size_t)SPHERES)); free(temp_s); dim3 grids(IMG_WIDTH/16,IMG_HEIGHT/16); dim3 threads(16,16); kernel<<<grids, threads>>>(dev_img); HANDLE_ERROR(cudaMemcpy( img, dev_img, img_size_t, cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaFree(dev_img)); HANDLE_ERROR( cudaEventRecord( stop,0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); float elapsedTime; HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop )); std::cout << "Time to generate: " << elapsedTime << "ms" << std::endl; // write img std::ofstream ofs; ofs.open("img.ppm"); ofs << "P3\n" << IMG_WIDTH << " " << IMG_HEIGHT << "\n255\n"; for (int i=0; i<img_size; i+=3) { ofs << img[i+0] << " " << img[i+1] << " " << img[i+2] << "\n"; } ofs.close(); return 0; }
a4d1bc188a95a16c0e991c4457ffa534babd7e1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <opencv2/opencv.hpp> #include <vector> __global__ void grayscale( unsigned char * rgb, unsigned char * g, std::size_t cols, std::size_t rows ) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if( i < cols && j < rows ) { g[ j * cols + i ] = ( 307 * rgb[ 3 * ( j * cols + i ) ] + 604 * rgb[ 3 * ( j * cols + i ) + 1 ] + 113 * rgb[ 3 * ( j * cols + i ) + 2 ] ) / 1024; } } int main() { cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED ); auto rgb = m_in.data; auto rows = m_in.rows; auto cols = m_in.cols; std::vector< unsigned char > g( rows * cols ); cv::Mat m_out( rows, cols, CV_8UC1, g.data() ); unsigned char * rgb_d; unsigned char * g_d; hipError_t err; hipEvent_t start, stop; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start ); err= hipMalloc( &rgb_d, 3 * rows * cols ); if( err != hipSuccess){ std::cerr << hipGetErrorString(err)<<"Erreur lors de l'allocation"<< std::endl;; } err=hipMalloc( &g_d, rows * cols ); if( err != hipSuccess){ std::cerr << hipGetErrorString(err)<<"Erreur lors de l'allocation"<< std::endl;; } err=hipMemcpy( rgb_d, rgb, 3 * rows * cols, hipMemcpyHostToDevice ); if( err != hipSuccess){ std::cerr << hipGetErrorString(err)<<"Erreur lors de la copie host to device"<< std::endl;; } dim3 t( 32, 32 ); dim3 b( ( cols - 1) / t.x + 1 , ( rows - 1 ) / t.y + 1 ); hipLaunchKernelGGL(( grayscale), dim3(b), dim3(t) , 0, 0, rgb_d, g_d, cols, rows ); // Rcupration du code erreur du kernel en cas de plantage. hipDeviceSynchronize(); // Attente de la fin d'excution du kernel. hipError_t err = hipGetLastError(); if( err != hipSuccess ) { std::cerr << hipGetErrorString( err ); // rcupration du message associ au code erreur. } err=hipMemcpy( g.data(), g_d, rows * cols, hipMemcpyDeviceToHost ); if( err != hipSuccess){ std::cerr << hipGetErrorString(err)<<"Erreur lors de la copie devise to host"<< std::endl;; } hipEventRecord( stop ); float duration = 0.0f; hipEventElapsedTime( &duration, start, stop ); std::cout << "Total Temps mis pour gray kernel est : " << duration << "ms\n"; cv::imwrite( "out.jpg", m_out ); hipFree( rgb_d); hipFree( g_d); return 0; }
a4d1bc188a95a16c0e991c4457ffa534babd7e1a.cu
#include <opencv2/opencv.hpp> #include <vector> __global__ void grayscale( unsigned char * rgb, unsigned char * g, std::size_t cols, std::size_t rows ) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if( i < cols && j < rows ) { g[ j * cols + i ] = ( 307 * rgb[ 3 * ( j * cols + i ) ] + 604 * rgb[ 3 * ( j * cols + i ) + 1 ] + 113 * rgb[ 3 * ( j * cols + i ) + 2 ] ) / 1024; } } int main() { cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED ); auto rgb = m_in.data; auto rows = m_in.rows; auto cols = m_in.cols; std::vector< unsigned char > g( rows * cols ); cv::Mat m_out( rows, cols, CV_8UC1, g.data() ); unsigned char * rgb_d; unsigned char * g_d; cudaError_t err; cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start ); err= cudaMalloc( &rgb_d, 3 * rows * cols ); if( err != cudaSuccess){ std::cerr << cudaGetErrorString(err)<<"Erreur lors de l'allocation"<< std::endl;; } err=cudaMalloc( &g_d, rows * cols ); if( err != cudaSuccess){ std::cerr << cudaGetErrorString(err)<<"Erreur lors de l'allocation"<< std::endl;; } err=cudaMemcpy( rgb_d, rgb, 3 * rows * cols, cudaMemcpyHostToDevice ); if( err != cudaSuccess){ std::cerr << cudaGetErrorString(err)<<"Erreur lors de la copie host to device"<< std::endl;; } dim3 t( 32, 32 ); dim3 b( ( cols - 1) / t.x + 1 , ( rows - 1 ) / t.y + 1 ); grayscale<<< b, t >>>( rgb_d, g_d, cols, rows ); // Récupération du code erreur du kernel en cas de plantage. cudaDeviceSynchronize(); // Attente de la fin d'exécution du kernel. cudaError err = cudaGetLastError(); if( err != cudaSuccess ) { std::cerr << cudaGetErrorString( err ); // récupération du message associé au code erreur. } err=cudaMemcpy( g.data(), g_d, rows * cols, cudaMemcpyDeviceToHost ); if( err != cudaSuccess){ std::cerr << cudaGetErrorString(err)<<"Erreur lors de la copie devise to host"<< std::endl;; } cudaEventRecord( stop ); float duration = 0.0f; cudaEventElapsedTime( &duration, start, stop ); std::cout << "Total Temps mis pour gray kernel est : " << duration << "ms\n"; cv::imwrite( "out.jpg", m_out ); cudaFree( rgb_d); cudaFree( g_d); return 0; }
2ae7f73b0f040071b4fc169002543e6d2f77bb72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "program.h" #include <math.h> #include <stdio.h> void check_cuda_err(hipError_t err) { if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); } __device__ float distance( global_params_t *params, float *point1, float *point2) { int i; float val, accum = 0; for (i = 0; i < params->dims; i++) { // Faster than using pow() val = point2[i] - point1[i]; accum += val * val; } return sqrt(accum); } __device__ int nearest_centroid( global_params_t *params, global_state_t *state, float *point) { int i, min_idx; float min_val, cur_val; // iterate through all centroids and determine which is the nearest point for (i = 0, min_val = -1; i < params->num_centroids; i++) { cur_val = distance(params, point, state->centroids[i]); if (cur_val < min_val || min_val == -1) { min_val = cur_val; min_idx = i; } } return min_idx; } __global__ void kernel( global_params_t params, dataset_t data, global_state_t state, agg_res_t result) { extern __shared__ char shared_start[]; int i, idx, centroid_idx, *update_counts; float *updates; // calculate address for updates and update counts update_counts = (int *)shared_start; updates = (float *)(shared_start + params.num_centroids * sizeof(int)); // zero out update counters if ((idx = threadIdx.x) < params.num_centroids) { update_counts[idx] = 0; for (i = 0; i < params.dims; i++) updates[idx * params.dims + i] = 0; } // synchronize threads __syncthreads(); // get index of datapoint to operate on idx = blockIdx.x * blockDim.x + threadIdx.x; // if datapoint is past end of data return if (idx >= data.num_points) return; // find nearest centroid centroid_idx = nearest_centroid(&params, &state, data.points[idx]); // add to update counts and update values atomically in shared memory atomicAdd(&update_counts[centroid_idx], 1); for (i = 0; i < params.dims; i++) atomicAdd( &updates[centroid_idx * params.dims + i], data.points[idx][i]); // synchronize threads __syncthreads(); // add to global updates if ((idx = threadIdx.x) < params.num_centroids) { atomicAdd(&result.update_counts[idx], update_counts[idx]); for (i = 0; i < params.dims; i++) atomicAdd( &result.centroid_updates[idx][i], updates[idx * params.dims + i]); } } void run_iteration( int block_size, global_params_t *params, dataset_t *data, global_state_t *state, agg_res_t *result) { int blocks; size_t shared_size; // calculate number of blocks to run for the dataset blocks = data->num_points / block_size; if (data->num_points % block_size) blocks++; // calculate shared allocation size for update_counts and updates shared_size = params->num_centroids * sizeof(int); shared_size += params->num_centroids * params->dims * sizeof(float); // run kernel hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(block_size), shared_size, 0, *params, *data, *state, *result); check_cuda_err(hipPeekAtLastError()); hipDeviceSynchronize(); } void setup_dataset(dataset_t *data, global_params_t *params) { int i; check_cuda_err( hipMallocManaged(&data->points, data->num_points * sizeof(float*))); for (i = 0; i < data->num_points; i++) check_cuda_err( hipMallocManaged(&data->points[i], params->dims * sizeof(float))); } void setup_global_state(global_state_t *state, global_params_t *params) { int i; check_cuda_err(hipMallocManaged( &state->centroids, params->num_centroids * sizeof(float*))); for (i = 0; i < params->num_centroids; i++) check_cuda_err(hipMallocManaged( &state->centroids[i], params->dims * sizeof(float))); } void setup_aggregation_result(agg_res_t *result, global_params_t *params) { int i; check_cuda_err(hipMallocManaged( &result->centroid_updates, params->num_centroids * sizeof(float*))); for (i = 0; i < params->num_centroids; i++) check_cuda_err(hipMallocManaged( &result->centroid_updates[i], params->dims * sizeof(float))); check_cuda_err(hipMallocManaged( &result->update_counts, params->num_centroids * sizeof(int))); } void free_dataset(dataset_t *data, global_params_t *params) { int i; for (i = 0; i < data->num_points; i++) hipFree(data->points[i]); hipFree(data->points); } void free_global_state(global_state_t *state, global_params_t *params) { int i; for (i = 0; i < params->num_centroids; i++) hipFree(state->centroids[i]); hipFree(state->centroids); } void free_aggregation_result(agg_res_t *result, global_params_t *params) { int i; for (i = 0; i < params->num_centroids; i++) hipFree(result->centroid_updates[i]); hipFree(result->centroid_updates); hipFree(result->update_counts); }
2ae7f73b0f040071b4fc169002543e6d2f77bb72.cu
#include "program.h" #include <math.h> #include <stdio.h> void check_cuda_err(cudaError_t err) { if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); } __device__ float distance( global_params_t *params, float *point1, float *point2) { int i; float val, accum = 0; for (i = 0; i < params->dims; i++) { // Faster than using pow() val = point2[i] - point1[i]; accum += val * val; } return sqrt(accum); } __device__ int nearest_centroid( global_params_t *params, global_state_t *state, float *point) { int i, min_idx; float min_val, cur_val; // iterate through all centroids and determine which is the nearest point for (i = 0, min_val = -1; i < params->num_centroids; i++) { cur_val = distance(params, point, state->centroids[i]); if (cur_val < min_val || min_val == -1) { min_val = cur_val; min_idx = i; } } return min_idx; } __global__ void kernel( global_params_t params, dataset_t data, global_state_t state, agg_res_t result) { extern __shared__ char shared_start[]; int i, idx, centroid_idx, *update_counts; float *updates; // calculate address for updates and update counts update_counts = (int *)shared_start; updates = (float *)(shared_start + params.num_centroids * sizeof(int)); // zero out update counters if ((idx = threadIdx.x) < params.num_centroids) { update_counts[idx] = 0; for (i = 0; i < params.dims; i++) updates[idx * params.dims + i] = 0; } // synchronize threads __syncthreads(); // get index of datapoint to operate on idx = blockIdx.x * blockDim.x + threadIdx.x; // if datapoint is past end of data return if (idx >= data.num_points) return; // find nearest centroid centroid_idx = nearest_centroid(&params, &state, data.points[idx]); // add to update counts and update values atomically in shared memory atomicAdd(&update_counts[centroid_idx], 1); for (i = 0; i < params.dims; i++) atomicAdd( &updates[centroid_idx * params.dims + i], data.points[idx][i]); // synchronize threads __syncthreads(); // add to global updates if ((idx = threadIdx.x) < params.num_centroids) { atomicAdd(&result.update_counts[idx], update_counts[idx]); for (i = 0; i < params.dims; i++) atomicAdd( &result.centroid_updates[idx][i], updates[idx * params.dims + i]); } } void run_iteration( int block_size, global_params_t *params, dataset_t *data, global_state_t *state, agg_res_t *result) { int blocks; size_t shared_size; // calculate number of blocks to run for the dataset blocks = data->num_points / block_size; if (data->num_points % block_size) blocks++; // calculate shared allocation size for update_counts and updates shared_size = params->num_centroids * sizeof(int); shared_size += params->num_centroids * params->dims * sizeof(float); // run kernel kernel<<<blocks, block_size, shared_size>>>( *params, *data, *state, *result); check_cuda_err(cudaPeekAtLastError()); cudaDeviceSynchronize(); } void setup_dataset(dataset_t *data, global_params_t *params) { int i; check_cuda_err( cudaMallocManaged(&data->points, data->num_points * sizeof(float*))); for (i = 0; i < data->num_points; i++) check_cuda_err( cudaMallocManaged(&data->points[i], params->dims * sizeof(float))); } void setup_global_state(global_state_t *state, global_params_t *params) { int i; check_cuda_err(cudaMallocManaged( &state->centroids, params->num_centroids * sizeof(float*))); for (i = 0; i < params->num_centroids; i++) check_cuda_err(cudaMallocManaged( &state->centroids[i], params->dims * sizeof(float))); } void setup_aggregation_result(agg_res_t *result, global_params_t *params) { int i; check_cuda_err(cudaMallocManaged( &result->centroid_updates, params->num_centroids * sizeof(float*))); for (i = 0; i < params->num_centroids; i++) check_cuda_err(cudaMallocManaged( &result->centroid_updates[i], params->dims * sizeof(float))); check_cuda_err(cudaMallocManaged( &result->update_counts, params->num_centroids * sizeof(int))); } void free_dataset(dataset_t *data, global_params_t *params) { int i; for (i = 0; i < data->num_points; i++) cudaFree(data->points[i]); cudaFree(data->points); } void free_global_state(global_state_t *state, global_params_t *params) { int i; for (i = 0; i < params->num_centroids; i++) cudaFree(state->centroids[i]); cudaFree(state->centroids); } void free_aggregation_result(agg_res_t *result, global_params_t *params) { int i; for (i = 0; i < params->num_centroids; i++) cudaFree(result->centroid_updates[i]); cudaFree(result->centroid_updates); cudaFree(result->update_counts); }
239c1a8c5c528a714d0b1c4e06bb3809f1a4ab4e.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include "Graph.h" #define MAX_THREAD_COUNT 1024 #define CEIL(a, b) ((a - 1) / b + 1) #define MAX_MEMORY ((long long)4e9) using namespace std; #define catchCudaError(error) { gpuAssert((error), __FILE__, __LINE__); } float device_time_taken; inline void gpuAssert(hipError_t error, const char *file, int line, bool abort = false) { if (error != hipSuccess) { printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, hipGetErrorString(error)); printf("\nIn file :%s\nOn line: %d", file, line); if(abort) exit(-1); } } __global__ void betweennessCentralityKernel(Graph *graph, float *bwCentrality, int nodeCount, int *sigma, int *distance, float *dependency, int *Q, int *Qpointers) { int idx = threadIdx.x; if(idx >= nodeCount) return; __shared__ int s; __shared__ int Q_len; __shared__ int Qpointers_len; __shared__ int noOfBlocks; if(idx == 0) { s = blockIdx.x - gridDim.x; noOfBlocks = gridDim.x; // printf("Progress... %3d%%", 0); } __syncthreads(); while(s < nodeCount - noOfBlocks) { if(idx == 0) { s += noOfBlocks; // printf("\rProgress... %5.2f%%", (s+1)*100.0/nodeCount); // printf("Node %d\n", s); Q[0 + (blockIdx.x * nodeCount)] = s; Q_len = 1; Qpointers[0 + (blockIdx.x * nodeCount)] = 0; Qpointers[1 + (blockIdx.x * nodeCount)] = 1; Qpointers_len = 1; } __syncthreads(); for(int v=idx; v<nodeCount; v+=blockDim.x) { if(v == s) { distance[v + (blockIdx.x * nodeCount)] = 0; sigma[v + (blockIdx.x * nodeCount)] = 1; } else { distance[v + (blockIdx.x * nodeCount)] = INT_MAX; sigma[v + (blockIdx.x * nodeCount)] = 0; } dependency[v + (blockIdx.x * nodeCount)] = 0.0; } __syncthreads(); // BFS while(true) { __syncthreads(); for(int k=idx; k<Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]; k+=blockDim.x) { if(k < Qpointers[Qpointers_len -1 + (blockIdx.x * nodeCount)]) continue; int v = Q[k + (blockIdx.x * nodeCount)]; for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(atomicCAS(&distance[w + (blockIdx.x * nodeCount)], INT_MAX, distance[v + (blockIdx.x * nodeCount)] +1) == INT_MAX) { int t = atomicAdd(&Q_len, 1); Q[t + (blockIdx.x * nodeCount)] = w; } if(distance[w + (blockIdx.x * nodeCount)] == (distance[v + (blockIdx.x * nodeCount)]+1)) { atomicAdd(&sigma[w + (blockIdx.x * nodeCount)], sigma[v + (blockIdx.x * nodeCount)]); } } } __syncthreads(); if(Q_len == Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]) break; if(idx == 0) { Qpointers_len++; Qpointers[Qpointers_len + (blockIdx.x * nodeCount)] = Q_len; } __syncthreads(); } __syncthreads(); // Reverse BFS while(Qpointers_len > 0) { for(int k=idx; k < Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]; k+=blockDim.x) { if(k < Qpointers[Qpointers_len -1 + (blockIdx.x * nodeCount)]) continue; int v = Q[k + (blockIdx.x * nodeCount)]; for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(distance[w + (blockIdx.x * nodeCount)] == (distance[v + (blockIdx.x * nodeCount)] + 1)) { if (sigma[w + (blockIdx.x * nodeCount)] != 0) dependency[v + (blockIdx.x * nodeCount)] += (sigma[v + (blockIdx.x * nodeCount)] * 1.0 / sigma[w + (blockIdx.x * nodeCount)]) * (1 + dependency[w + (blockIdx.x * nodeCount)]); } } if (v != s) { // Each shortest path is counted twice. So, each partial shortest path dependency is halved. atomicAdd(bwCentrality + v, dependency[v + (blockIdx.x * nodeCount)] / 2); } } __syncthreads(); if(idx == 0) Qpointers_len--; __syncthreads(); } } } float *betweennessCentrality(Graph *graph, int nodeCount) { float *bwCentrality = new float[nodeCount](); float *device_bwCentrality, *dependency; int *sigma, *distance, *Q, *Qpointers; const int BLOCK_COUNT = MAX_MEMORY / (4 * 5 * nodeCount); catchCudaError(hipMalloc((void **)&device_bwCentrality, sizeof(float) * nodeCount)); catchCudaError(hipMalloc((void **)&sigma, sizeof(int) * nodeCount * BLOCK_COUNT)); catchCudaError(hipMalloc((void **)&distance, sizeof(int) * nodeCount * BLOCK_COUNT)); catchCudaError(hipMalloc((void **)&Q, sizeof(int) * (nodeCount) * BLOCK_COUNT)); catchCudaError(hipMalloc((void **)&Qpointers, sizeof(int) * (nodeCount) * BLOCK_COUNT)); catchCudaError(hipMalloc((void **)&dependency, sizeof(float) * nodeCount * BLOCK_COUNT)); catchCudaError(hipMemcpy(device_bwCentrality, bwCentrality, sizeof(float) * nodeCount, hipMemcpyHostToDevice)); // Timer hipEvent_t device_start, device_end; catchCudaError(hipEventCreate(&device_start)); catchCudaError(hipEventCreate(&device_end)); catchCudaError(hipEventRecord(device_start)); hipLaunchKernelGGL(( betweennessCentralityKernel), dim3(BLOCK_COUNT), dim3(MAX_THREAD_COUNT), 0, 0, graph, device_bwCentrality, nodeCount, sigma, distance, dependency, Q, Qpointers); hipDeviceSynchronize(); //End of progress bar cout << endl; // Timer catchCudaError(hipEventRecord(device_end)); catchCudaError(hipEventSynchronize(device_end)); hipEventElapsedTime(&device_time_taken, device_start, device_end); // Copy back and free memory catchCudaError(hipMemcpy(bwCentrality, device_bwCentrality, sizeof(float) * nodeCount, hipMemcpyDeviceToHost)); catchCudaError(hipFree(device_bwCentrality)); catchCudaError(hipFree(sigma)); catchCudaError(hipFree(dependency)); catchCudaError(hipFree(distance)); catchCudaError(hipFree(Q)); catchCudaError(hipFree(Qpointers)); return bwCentrality; } int main(int argc, char *argv[]) { if (argc < 2) { cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n"; return 0; } char choice; cout << "Would you like to print the Graph Betweenness Centrality for all nodes? (y/n) "; cin >> choice; freopen(argv[1], "r", stdin); Graph *host_graph = new Graph(); Graph *device_graph; catchCudaError(hipMalloc((void **)&device_graph, sizeof(Graph))); host_graph->readGraph(); int nodeCount = host_graph->getNodeCount(); int edgeCount = host_graph->getEdgeCount(); catchCudaError(hipMemcpy(device_graph, host_graph, sizeof(Graph), hipMemcpyHostToDevice)); // Copy Adjancency List to device int *adjacencyList; // Alocate device memory and copy catchCudaError(hipMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1))); catchCudaError(hipMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), hipMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(hipMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), hipMemcpyHostToDevice)); // Copy Adjancency List Pointers to device int *adjacencyListPointers; // Alocate device memory and copy catchCudaError(hipMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1))); catchCudaError(hipMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), hipMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(hipMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), hipMemcpyHostToDevice)); float *bwCentrality = betweennessCentrality(device_graph, nodeCount); float maxBetweenness = -1; for (int i = 0; i < nodeCount; i++) { maxBetweenness = max(maxBetweenness, bwCentrality[i]); } printf("%f", device_time_taken); if (argc == 3) { freopen(argv[2], "w", stdout); for (int i = 0; i < nodeCount; i++) cout << bwCentrality[i] << " "; cout << endl; } // Free all memory delete[] bwCentrality; catchCudaError(hipFree(adjacencyList)); catchCudaError(hipFree(adjacencyListPointers)); catchCudaError(hipFree(device_graph)); }
239c1a8c5c528a714d0b1c4e06bb3809f1a4ab4e.cu
#include <iostream> #include <cuda.h> #include "Graph.h" #define MAX_THREAD_COUNT 1024 #define CEIL(a, b) ((a - 1) / b + 1) #define MAX_MEMORY ((long long)4e9) using namespace std; #define catchCudaError(error) { gpuAssert((error), __FILE__, __LINE__); } float device_time_taken; inline void gpuAssert(cudaError_t error, const char *file, int line, bool abort = false) { if (error != cudaSuccess) { printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, cudaGetErrorString(error)); printf("\nIn file :%s\nOn line: %d", file, line); if(abort) exit(-1); } } __global__ void betweennessCentralityKernel(Graph *graph, float *bwCentrality, int nodeCount, int *sigma, int *distance, float *dependency, int *Q, int *Qpointers) { int idx = threadIdx.x; if(idx >= nodeCount) return; __shared__ int s; __shared__ int Q_len; __shared__ int Qpointers_len; __shared__ int noOfBlocks; if(idx == 0) { s = blockIdx.x - gridDim.x; noOfBlocks = gridDim.x; // printf("Progress... %3d%%", 0); } __syncthreads(); while(s < nodeCount - noOfBlocks) { if(idx == 0) { s += noOfBlocks; // printf("\rProgress... %5.2f%%", (s+1)*100.0/nodeCount); // printf("Node %d\n", s); Q[0 + (blockIdx.x * nodeCount)] = s; Q_len = 1; Qpointers[0 + (blockIdx.x * nodeCount)] = 0; Qpointers[1 + (blockIdx.x * nodeCount)] = 1; Qpointers_len = 1; } __syncthreads(); for(int v=idx; v<nodeCount; v+=blockDim.x) { if(v == s) { distance[v + (blockIdx.x * nodeCount)] = 0; sigma[v + (blockIdx.x * nodeCount)] = 1; } else { distance[v + (blockIdx.x * nodeCount)] = INT_MAX; sigma[v + (blockIdx.x * nodeCount)] = 0; } dependency[v + (blockIdx.x * nodeCount)] = 0.0; } __syncthreads(); // BFS while(true) { __syncthreads(); for(int k=idx; k<Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]; k+=blockDim.x) { if(k < Qpointers[Qpointers_len -1 + (blockIdx.x * nodeCount)]) continue; int v = Q[k + (blockIdx.x * nodeCount)]; for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(atomicCAS(&distance[w + (blockIdx.x * nodeCount)], INT_MAX, distance[v + (blockIdx.x * nodeCount)] +1) == INT_MAX) { int t = atomicAdd(&Q_len, 1); Q[t + (blockIdx.x * nodeCount)] = w; } if(distance[w + (blockIdx.x * nodeCount)] == (distance[v + (blockIdx.x * nodeCount)]+1)) { atomicAdd(&sigma[w + (blockIdx.x * nodeCount)], sigma[v + (blockIdx.x * nodeCount)]); } } } __syncthreads(); if(Q_len == Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]) break; if(idx == 0) { Qpointers_len++; Qpointers[Qpointers_len + (blockIdx.x * nodeCount)] = Q_len; } __syncthreads(); } __syncthreads(); // Reverse BFS while(Qpointers_len > 0) { for(int k=idx; k < Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]; k+=blockDim.x) { if(k < Qpointers[Qpointers_len -1 + (blockIdx.x * nodeCount)]) continue; int v = Q[k + (blockIdx.x * nodeCount)]; for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(distance[w + (blockIdx.x * nodeCount)] == (distance[v + (blockIdx.x * nodeCount)] + 1)) { if (sigma[w + (blockIdx.x * nodeCount)] != 0) dependency[v + (blockIdx.x * nodeCount)] += (sigma[v + (blockIdx.x * nodeCount)] * 1.0 / sigma[w + (blockIdx.x * nodeCount)]) * (1 + dependency[w + (blockIdx.x * nodeCount)]); } } if (v != s) { // Each shortest path is counted twice. So, each partial shortest path dependency is halved. atomicAdd(bwCentrality + v, dependency[v + (blockIdx.x * nodeCount)] / 2); } } __syncthreads(); if(idx == 0) Qpointers_len--; __syncthreads(); } } } float *betweennessCentrality(Graph *graph, int nodeCount) { float *bwCentrality = new float[nodeCount](); float *device_bwCentrality, *dependency; int *sigma, *distance, *Q, *Qpointers; const int BLOCK_COUNT = MAX_MEMORY / (4 * 5 * nodeCount); catchCudaError(cudaMalloc((void **)&device_bwCentrality, sizeof(float) * nodeCount)); catchCudaError(cudaMalloc((void **)&sigma, sizeof(int) * nodeCount * BLOCK_COUNT)); catchCudaError(cudaMalloc((void **)&distance, sizeof(int) * nodeCount * BLOCK_COUNT)); catchCudaError(cudaMalloc((void **)&Q, sizeof(int) * (nodeCount) * BLOCK_COUNT)); catchCudaError(cudaMalloc((void **)&Qpointers, sizeof(int) * (nodeCount) * BLOCK_COUNT)); catchCudaError(cudaMalloc((void **)&dependency, sizeof(float) * nodeCount * BLOCK_COUNT)); catchCudaError(cudaMemcpy(device_bwCentrality, bwCentrality, sizeof(float) * nodeCount, cudaMemcpyHostToDevice)); // Timer cudaEvent_t device_start, device_end; catchCudaError(cudaEventCreate(&device_start)); catchCudaError(cudaEventCreate(&device_end)); catchCudaError(cudaEventRecord(device_start)); betweennessCentralityKernel<<<BLOCK_COUNT, MAX_THREAD_COUNT>>>(graph, device_bwCentrality, nodeCount, sigma, distance, dependency, Q, Qpointers); cudaDeviceSynchronize(); //End of progress bar cout << endl; // Timer catchCudaError(cudaEventRecord(device_end)); catchCudaError(cudaEventSynchronize(device_end)); cudaEventElapsedTime(&device_time_taken, device_start, device_end); // Copy back and free memory catchCudaError(cudaMemcpy(bwCentrality, device_bwCentrality, sizeof(float) * nodeCount, cudaMemcpyDeviceToHost)); catchCudaError(cudaFree(device_bwCentrality)); catchCudaError(cudaFree(sigma)); catchCudaError(cudaFree(dependency)); catchCudaError(cudaFree(distance)); catchCudaError(cudaFree(Q)); catchCudaError(cudaFree(Qpointers)); return bwCentrality; } int main(int argc, char *argv[]) { if (argc < 2) { cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n"; return 0; } char choice; cout << "Would you like to print the Graph Betweenness Centrality for all nodes? (y/n) "; cin >> choice; freopen(argv[1], "r", stdin); Graph *host_graph = new Graph(); Graph *device_graph; catchCudaError(cudaMalloc((void **)&device_graph, sizeof(Graph))); host_graph->readGraph(); int nodeCount = host_graph->getNodeCount(); int edgeCount = host_graph->getEdgeCount(); catchCudaError(cudaMemcpy(device_graph, host_graph, sizeof(Graph), cudaMemcpyHostToDevice)); // Copy Adjancency List to device int *adjacencyList; // Alocate device memory and copy catchCudaError(cudaMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1))); catchCudaError(cudaMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), cudaMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(cudaMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), cudaMemcpyHostToDevice)); // Copy Adjancency List Pointers to device int *adjacencyListPointers; // Alocate device memory and copy catchCudaError(cudaMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1))); catchCudaError(cudaMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), cudaMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(cudaMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), cudaMemcpyHostToDevice)); float *bwCentrality = betweennessCentrality(device_graph, nodeCount); float maxBetweenness = -1; for (int i = 0; i < nodeCount; i++) { maxBetweenness = max(maxBetweenness, bwCentrality[i]); } printf("%f", device_time_taken); if (argc == 3) { freopen(argv[2], "w", stdout); for (int i = 0; i < nodeCount; i++) cout << bwCentrality[i] << " "; cout << endl; } // Free all memory delete[] bwCentrality; catchCudaError(cudaFree(adjacencyList)); catchCudaError(cudaFree(adjacencyListPointers)); catchCudaError(cudaFree(device_graph)); }
7d26e3f7b3f41463f75c013a13223e97fccf4421.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************* * Copyright 2011-2012, * Marwan Abdellah: <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation. * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. ********************************************************************/ #ifndef _CU_DIVIDE_1D_ARRAY_DEVICE_IMPL_CU_ #define _CU_DIVIDE_1D_ARRAY_DEVICE_IMPL_CU_ #include "cuGlobals.h" #include "Timers/Boost.h" #include "Kernels/Constant_Divide_1D_Array_Kernel.cu" /*! Implementation for the Constant_Divide_1D_Array_Kernel kernel. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template <typename T> extern void cu_Constant_Divide_1D_Array_Impl (dim3 cuBlock, dim3 cuGrid, T* devArrayInput, T constVal, T* devArrayOutput, int N, cuProfile* profile) { // Create CUDA timer cutCreateTimer(&(profile->kernelTime)); // Reset CUDA timer cutResetTimer(profile->kernelTime); // Start CUDA timer cutStartTimer(profile->kernelTime); // Execute the kernel hipLaunchKernelGGL(( Constant_Divide_1D_Array_Kernel) , dim3(cuGrid), dim3(cuBlock) , 0, 0, devArrayInput, constVal, devArrayOutput, N); // Stop CUDA timer cutStopTimer(profile->kernelTime); // Calculate kernel execution time profile->kernelDuration = cutGetTimerValue(profile->kernelTime); // Check successfull execution of the kernel profile->kernelExecErr = hipPeekAtLastError(); } /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <char> (dim3 cuBlock, dim3 cuGrid, char *devArrayInput, char constVal, char* devArrayOutput, int N, cuProfile* profile); /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <unsigned char> (dim3 cuBlock, dim3 cuGrid, unsigned char* devArrayInput, unsigned char constVal, unsigned char* devArrayOutput, int N, cuProfile* profile); /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <int> (dim3 cuBlock, dim3 cuGrid, int* devArrayInput, int constVal, int* devArrayOutput, int N, cuProfile* profile); /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <unsigned int> (dim3 cuBlock, dim3 cuGrid, unsigned int* devArrayInput, unsigned int constVal, unsigned int* devArrayOutput, int N, cuProfile* profile); /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <float> (dim3 cuBlock, dim3 cuGrid, float* devArrayInput, float constVal, float* devArrayOutput, int N, cuProfile* profile); /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <double> (dim3 cuBlock, dim3 cuGrid, double* devArrayInput, double constVal, double* devArrayOutput, int N, cuProfile* profile); #endif // _CU_DIVIDE_1D_ARRAY_DEVICE_IMPL_CU_
7d26e3f7b3f41463f75c013a13223e97fccf4421.cu
/********************************************************************* * Copyright © 2011-2012, * Marwan Abdellah: <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation. * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. ********************************************************************/ #ifndef _CU_DIVIDE_1D_ARRAY_DEVICE_IMPL_CU_ #define _CU_DIVIDE_1D_ARRAY_DEVICE_IMPL_CU_ #include "cuGlobals.h" #include "Timers/Boost.h" #include "Kernels/Constant_Divide_1D_Array_Kernel.cu" /*! Implementation for the Constant_Divide_1D_Array_Kernel kernel. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template <typename T> extern void cu_Constant_Divide_1D_Array_Impl (dim3 cuBlock, dim3 cuGrid, T* devArrayInput, T constVal, T* devArrayOutput, int N, cuProfile* profile) { // Create CUDA timer cutCreateTimer(&(profile->kernelTime)); // Reset CUDA timer cutResetTimer(profile->kernelTime); // Start CUDA timer cutStartTimer(profile->kernelTime); // Execute the kernel Constant_Divide_1D_Array_Kernel <<< cuGrid, cuBlock >>> (devArrayInput, constVal, devArrayOutput, N); // Stop CUDA timer cutStopTimer(profile->kernelTime); // Calculate kernel execution time profile->kernelDuration = cutGetTimerValue(profile->kernelTime); // Check successfull execution of the kernel profile->kernelExecErr = cudaPeekAtLastError(); } /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <char> (dim3 cuBlock, dim3 cuGrid, char *devArrayInput, char constVal, char* devArrayOutput, int N, cuProfile* profile); /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <unsigned char> (dim3 cuBlock, dim3 cuGrid, unsigned char* devArrayInput, unsigned char constVal, unsigned char* devArrayOutput, int N, cuProfile* profile); /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <int> (dim3 cuBlock, dim3 cuGrid, int* devArrayInput, int constVal, int* devArrayOutput, int N, cuProfile* profile); /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <unsigned int> (dim3 cuBlock, dim3 cuGrid, unsigned int* devArrayInput, unsigned int constVal, unsigned int* devArrayOutput, int N, cuProfile* profile); /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <float> (dim3 cuBlock, dim3 cuGrid, float* devArrayInput, float constVal, float* devArrayOutput, int N, cuProfile* profile); /*! Instantiates cu_Constant_Divide_1D_Array_Impl() with the explicitly specified template for * input vector of type char. * * @param cuBlock * Kernel block configuration. * * @param cuGrid * Kernel grid configuration. * * @param devArrayInput * Input device vector. * * @param constVal * Constant value to divide the input device vector by. * * @param devArrayOutput * Output device vector. * * @param N * Length of the input vector. * * @param profile * GPU profiling structure. * */ template void cu_Constant_Divide_1D_Array_Impl <double> (dim3 cuBlock, dim3 cuGrid, double* devArrayInput, double constVal, double* devArrayOutput, int N, cuProfile* profile); #endif // _CU_DIVIDE_1D_ARRAY_DEVICE_IMPL_CU_
9019d93afba2e39db115e1e6fcfe976b9d53cdf7.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "common.h" #if THRUST_PATH #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> #if TORCH_HIP_VERSION >= 7000 #include <thrust/system/hip/execution_policy.h> #endif #else #include <bolt/amp/functional.h> #include <bolt/amp/inner_product.h> #endif struct smoothl1_functor { __host__ __device__ smoothl1_functor() {} __host__ __device__ float operator()(const float &x, const float &y) const { float z = fabsf(x-y); return z < 1.f ? 0.5f*z*z : z - 0.5f; } __host__ __device__ ~smoothl1_functor() {} }; void THNN_CudaSmoothL1Criterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage) { THCUNN_assertSameGPU(state, 2, input, target); THArgCheck( THCudaTensor_nElement(state, input) == THCudaTensor_nElement(state, target), 2, "input and target need to have the same number of elements" ); long size = THCudaTensor_nElement(state, input); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); #if THRUST_PATH thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); float sum = thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par.on(THCState_getCurrentStream(state)), #endif input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), smoothl1_functor() ); #else auto input_data = THCudaTensor_data(state, input); auto target_data = THCudaTensor_data(state, target); float sum = bolt::amp::inner_product(input_data, input_data+size, target_data, 0.0f, bolt::amp::plus<float>(), smoothl1_functor()); #endif if (sizeAverage) sum /= size; THCudaTensor_free(state, input); THCudaTensor_free(state, target); THCudaTensor_set1d(state, output, 0, sum); } struct smoothl1_updateGradInput_functor { float norm; __host__ __device__ smoothl1_updateGradInput_functor() = default; __host__ __device__ smoothl1_updateGradInput_functor(float norm_) : norm(norm_) {} smoothl1_updateGradInput_functor(const smoothl1_updateGradInput_functor& fun) = default; __host__ __device__ float operator()(const float &x, const float &y) const { float z = x - y; if (z < -1.f) return -norm; else if (z > 1.f) return norm; else return norm * z; } }; void THNN_CudaSmoothL1Criterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage) { THCUNN_assertSameGPU(state, 3, input, target, gradInput); THArgCheck( THCudaTensor_nElement(state, input) == THCudaTensor_nElement(state, target), 2, "input and target need to have the same number of elements" ); long size = THCudaTensor_nElement(state, input); float norm = sizeAverage ? 1./size : 1.; input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); THCudaTensor_resizeAs(state, gradInput, input); #if THRUST_PATH thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput)); thrust::transform( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par.on(THCState_getCurrentStream(state)), #endif input_data, input_data+size, target_data, gradInput_data, smoothl1_updateGradInput_functor(norm) ); #else auto input_data = THCudaTensor_data(state, input); auto target_data = THCudaTensor_data(state, target); auto gradInput_data = THCudaTensor_data(state, gradInput); bolt::amp::transform(input_data, input_data+size, target_data, gradInput_data, smoothl1_updateGradInput_functor(norm)); #endif THCudaTensor_free(state, input); THCudaTensor_free(state, target); }
9019d93afba2e39db115e1e6fcfe976b9d53cdf7.cu
#include "THCUNN.h" #include "common.h" #if THRUST_PATH #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> #if CUDA_VERSION >= 7000 #include <thrust/system/cuda/execution_policy.h> #endif #else #include <bolt/amp/functional.h> #include <bolt/amp/inner_product.h> #endif struct smoothl1_functor { __host__ __device__ smoothl1_functor() {} __host__ __device__ float operator()(const float &x, const float &y) const { float z = fabsf(x-y); return z < 1.f ? 0.5f*z*z : z - 0.5f; } __host__ __device__ ~smoothl1_functor() {} }; void THNN_CudaSmoothL1Criterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage) { THCUNN_assertSameGPU(state, 2, input, target); THArgCheck( THCudaTensor_nElement(state, input) == THCudaTensor_nElement(state, target), 2, "input and target need to have the same number of elements" ); long size = THCudaTensor_nElement(state, input); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); #if THRUST_PATH thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); float sum = thrust::inner_product( #if CUDA_VERSION >= 7000 thrust::cuda::par.on(THCState_getCurrentStream(state)), #endif input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), smoothl1_functor() ); #else auto input_data = THCudaTensor_data(state, input); auto target_data = THCudaTensor_data(state, target); float sum = bolt::amp::inner_product(input_data, input_data+size, target_data, 0.0f, bolt::amp::plus<float>(), smoothl1_functor()); #endif if (sizeAverage) sum /= size; THCudaTensor_free(state, input); THCudaTensor_free(state, target); THCudaTensor_set1d(state, output, 0, sum); } struct smoothl1_updateGradInput_functor { float norm; __host__ __device__ smoothl1_updateGradInput_functor() = default; __host__ __device__ smoothl1_updateGradInput_functor(float norm_) : norm(norm_) {} smoothl1_updateGradInput_functor(const smoothl1_updateGradInput_functor& fun) = default; __host__ __device__ float operator()(const float &x, const float &y) const { float z = x - y; if (z < -1.f) return -norm; else if (z > 1.f) return norm; else return norm * z; } }; void THNN_CudaSmoothL1Criterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage) { THCUNN_assertSameGPU(state, 3, input, target, gradInput); THArgCheck( THCudaTensor_nElement(state, input) == THCudaTensor_nElement(state, target), 2, "input and target need to have the same number of elements" ); long size = THCudaTensor_nElement(state, input); float norm = sizeAverage ? 1./size : 1.; input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); THCudaTensor_resizeAs(state, gradInput, input); #if THRUST_PATH thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput)); thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par.on(THCState_getCurrentStream(state)), #endif input_data, input_data+size, target_data, gradInput_data, smoothl1_updateGradInput_functor(norm) ); #else auto input_data = THCudaTensor_data(state, input); auto target_data = THCudaTensor_data(state, target); auto gradInput_data = THCudaTensor_data(state, gradInput); bolt::amp::transform(input_data, input_data+size, target_data, gradInput_data, smoothl1_updateGradInput_functor(norm)); #endif THCudaTensor_free(state, input); THCudaTensor_free(state, target); }
1dce33af3794b9022f77b7bbd2287760f3013557.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> // #include <helper_cuda.h> __global__ void HelloWorld(){ printf("Hello World\n"); int i = blockIdx.x * blockDim.x + threadIdx.x; printf("i = %d, thread %d, block %d\n", i, threadIdx.x, blockIdx.x); } int main(void){ hipError_t err = hipSuccess; printf("HelloWorld<<<1,5>>>();"); hipLaunchKernelGGL(( HelloWorld), dim3(1),dim3(5), 0, 0, ); printf("HelloWorld<<<5,5>>>();"); hipLaunchKernelGGL(( HelloWorld), dim3(2),dim3(5), 0, 0, ); printf("HelloWorld<<<5,5>>>();"); hipLaunchKernelGGL(( HelloWorld), dim3(2),dim3(5), 0, 0, ); if ((err = hipGetLastError()) != hipSuccess){ fprintf(stderr, "Failed to launch kernel: %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipDeviceSynchronize(); return 0; }
1dce33af3794b9022f77b7bbd2287760f3013557.cu
#include <stdio.h> #include <cuda_runtime.h> // #include <helper_cuda.h> __global__ void HelloWorld(){ printf("Hello World\n"); int i = blockIdx.x * blockDim.x + threadIdx.x; printf("i = %d, thread %d, block %d\n", i, threadIdx.x, blockIdx.x); } int main(void){ cudaError_t err = cudaSuccess; printf("HelloWorld<<<1,5>>>();"); HelloWorld<<<1,5>>>(); printf("HelloWorld<<<5,5>>>();"); HelloWorld<<<2,5>>>(); printf("HelloWorld<<<5,5>>>();"); HelloWorld<<<2,5>>>(); if ((err = cudaGetLastError()) != cudaSuccess){ fprintf(stderr, "Failed to launch kernel: %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaDeviceSynchronize(); return 0; }
752f941e792efd73177bd9c62cda9b675746decf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand.h> #include <chrono> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <time.h> #define PRECISION double __global__ void monteCuda(PRECISION *counts, int num_iter, hiprandState_t *states) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int seed = idx; hiprand_init(seed, idx, 0, &states[idx]); int count = 0; PRECISION x, y, z; // Calculate PI following a Monte Carlo method for (int iter = 0; iter < num_iter; iter++) { // Generate random (X,Y) points x = hiprand_uniform(&states[idx]); y = hiprand_uniform(&states[idx]); z = sqrt((x*x) + (y*y)); // Check if point is in unit circle if (z <= 1.0) { count++; } } counts[idx] = ((PRECISION)count / (PRECISION)num_iter); } // returns if values successfully read or not. bool setValuesFromArgs(int argc, char **argv, unsigned int *block_size, unsigned int *num_threads, unsigned int *num_iter) { if (argc < 4) { printf("Incorrect parameters!\nUsage: %s <block size> <num threads> <iterations per thread>\n", *argv); return false; } char *s; *block_size = strtoul(argv[1], &s, 10); *num_threads = strtoul(argv[2], &s, 10); *num_iter = strtoul(argv[3], &s, 10); return true; } int main(int argc, char* argv[]) { unsigned int block_size, num_threads, num_iter; if(!setValuesFromArgs(argc, argv, &block_size, &num_threads, &num_iter)) return 0; bool bench = argc == 5; auto start = std::chrono::system_clock::now(); // Change num_threads to a multiple of block_size to prevent unexpected outcomes (memory size not matching up etc) num_threads = ((num_threads + block_size - 1) / block_size) * block_size; PRECISION count = 0.0; PRECISION pi; PRECISION *counts = (PRECISION*)malloc(num_threads * sizeof(PRECISION)); hiprandState_t *dev_random; hipMalloc(&dev_random, num_threads*sizeof(hiprandState_t)); PRECISION *p_counts = 0; hipMalloc(&p_counts, num_threads * sizeof(PRECISION)); hipLaunchKernelGGL(( monteCuda), dim3((num_threads + block_size - 1) / block_size), dim3(block_size), 0, 0, p_counts, num_iter, dev_random); hipDeviceSynchronize(); hipMemcpy(counts, p_counts, num_threads * sizeof(PRECISION), hipMemcpyDeviceToHost); // Estimate Pi and display the result for(int i = 0; i < num_threads; i++) { count += counts[i]; } pi = (count / (PRECISION)num_threads) * 4.0; auto end = std::chrono::system_clock::now(); std::chrono::duration<double> time = end-start; if(bench) { printf("%f %f\n", pi, time.count()); } else printf("The result is %f\n", pi); return 0; }
752f941e792efd73177bd9c62cda9b675746decf.cu
#include <curand_kernel.h> #include <curand.h> #include <chrono> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <time.h> #define PRECISION double __global__ void monteCuda(PRECISION *counts, int num_iter, curandState *states) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int seed = idx; curand_init(seed, idx, 0, &states[idx]); int count = 0; PRECISION x, y, z; // Calculate PI following a Monte Carlo method for (int iter = 0; iter < num_iter; iter++) { // Generate random (X,Y) points x = curand_uniform(&states[idx]); y = curand_uniform(&states[idx]); z = sqrt((x*x) + (y*y)); // Check if point is in unit circle if (z <= 1.0) { count++; } } counts[idx] = ((PRECISION)count / (PRECISION)num_iter); } // returns if values successfully read or not. bool setValuesFromArgs(int argc, char **argv, unsigned int *block_size, unsigned int *num_threads, unsigned int *num_iter) { if (argc < 4) { printf("Incorrect parameters!\nUsage: %s <block size> <num threads> <iterations per thread>\n", *argv); return false; } char *s; *block_size = strtoul(argv[1], &s, 10); *num_threads = strtoul(argv[2], &s, 10); *num_iter = strtoul(argv[3], &s, 10); return true; } int main(int argc, char* argv[]) { unsigned int block_size, num_threads, num_iter; if(!setValuesFromArgs(argc, argv, &block_size, &num_threads, &num_iter)) return 0; bool bench = argc == 5; auto start = std::chrono::system_clock::now(); // Change num_threads to a multiple of block_size to prevent unexpected outcomes (memory size not matching up etc) num_threads = ((num_threads + block_size - 1) / block_size) * block_size; PRECISION count = 0.0; PRECISION pi; PRECISION *counts = (PRECISION*)malloc(num_threads * sizeof(PRECISION)); curandState *dev_random; cudaMalloc(&dev_random, num_threads*sizeof(curandState)); PRECISION *p_counts = 0; cudaMalloc(&p_counts, num_threads * sizeof(PRECISION)); monteCuda<<<(num_threads + block_size - 1) / block_size, block_size>>>(p_counts, num_iter, dev_random); cudaDeviceSynchronize(); cudaMemcpy(counts, p_counts, num_threads * sizeof(PRECISION), cudaMemcpyDeviceToHost); // Estimate Pi and display the result for(int i = 0; i < num_threads; i++) { count += counts[i]; } pi = (count / (PRECISION)num_threads) * 4.0; auto end = std::chrono::system_clock::now(); std::chrono::duration<double> time = end-start; if(bench) { printf("%f %f\n", pi, time.count()); } else printf("The result is %f\n", pi); return 0; }
8c67fd8ab1dbdafd83f4fa1d3645957678bc668e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_x1; int xdim0_advec_mom_kernel_x1_h = -1; __constant__ int ydim0_advec_mom_kernel_x1; int ydim0_advec_mom_kernel_x1_h = -1; __constant__ int xdim1_advec_mom_kernel_x1; int xdim1_advec_mom_kernel_x1_h = -1; __constant__ int ydim1_advec_mom_kernel_x1; int ydim1_advec_mom_kernel_x1_h = -1; __constant__ int xdim2_advec_mom_kernel_x1; int xdim2_advec_mom_kernel_x1_h = -1; __constant__ int ydim2_advec_mom_kernel_x1; int ydim2_advec_mom_kernel_x1_h = -1; __constant__ int xdim3_advec_mom_kernel_x1; int xdim3_advec_mom_kernel_x1_h = -1; __constant__ int ydim3_advec_mom_kernel_x1; int ydim3_advec_mom_kernel_x1_h = -1; __constant__ int xdim4_advec_mom_kernel_x1; int xdim4_advec_mom_kernel_x1_h = -1; __constant__ int ydim4_advec_mom_kernel_x1; int ydim4_advec_mom_kernel_x1_h = -1; __constant__ int xdim5_advec_mom_kernel_x1; int xdim5_advec_mom_kernel_x1_h = -1; __constant__ int ydim5_advec_mom_kernel_x1; int ydim5_advec_mom_kernel_x1_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel_x1*(y)+xdim0_advec_mom_kernel_x1*ydim0_advec_mom_kernel_x1*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel_x1*(y)+xdim1_advec_mom_kernel_x1*ydim1_advec_mom_kernel_x1*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel_x1*(y)+xdim2_advec_mom_kernel_x1*ydim2_advec_mom_kernel_x1*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel_x1*(y)+xdim3_advec_mom_kernel_x1*ydim3_advec_mom_kernel_x1*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_mom_kernel_x1*(y)+xdim4_advec_mom_kernel_x1*ydim4_advec_mom_kernel_x1*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_advec_mom_kernel_x1*(y)+xdim5_advec_mom_kernel_x1*ydim5_advec_mom_kernel_x1*(z)) //user function __device__ inline void advec_mom_kernel_x1( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x, const double *vol_flux_y, const double *vol_flux_z) { post_vol[OPS_ACC1(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_y[OPS_ACC4(0,1,0)] - vol_flux_y[OPS_ACC4(0,0,0)] + vol_flux_z[OPS_ACC5(0,0,1)] - vol_flux_z[OPS_ACC5(0,0,0)]; pre_vol[OPS_ACC0(0,0,0)] = post_vol[OPS_ACC1(0,0,0)] + vol_flux_x[OPS_ACC3(1,0,0)] - vol_flux_x[OPS_ACC3(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_advec_mom_kernel_x1( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, const double* __restrict arg5, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_advec_mom_kernel_x1 + idx_z * 1 * xdim0_advec_mom_kernel_x1 * ydim0_advec_mom_kernel_x1; arg1 += idx_x * 1 + idx_y * 1 * xdim1_advec_mom_kernel_x1 + idx_z * 1 * xdim1_advec_mom_kernel_x1 * ydim1_advec_mom_kernel_x1; arg2 += idx_x * 1 + idx_y * 1 * xdim2_advec_mom_kernel_x1 + idx_z * 1 * xdim2_advec_mom_kernel_x1 * ydim2_advec_mom_kernel_x1; arg3 += idx_x * 1 + idx_y * 1 * xdim3_advec_mom_kernel_x1 + idx_z * 1 * xdim3_advec_mom_kernel_x1 * ydim3_advec_mom_kernel_x1; arg4 += idx_x * 1 + idx_y * 1 * xdim4_advec_mom_kernel_x1 + idx_z * 1 * xdim4_advec_mom_kernel_x1 * ydim4_advec_mom_kernel_x1; arg5 += idx_x * 1 + idx_y * 1 * xdim5_advec_mom_kernel_x1 + idx_z * 1 * xdim5_advec_mom_kernel_x1 * ydim5_advec_mom_kernel_x1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_x1(arg0, arg1, arg2, arg3, arg4, arg5); } } // host stub function void ops_par_loop_advec_mom_kernel_x1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5}; ops_timing_realloc(11,"advec_mom_kernel_x1"); OPS_kernels[11].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]*args[4].dat->dim; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]*args[5].dat->dim; int ydim5 = args[5].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_advec_mom_kernel_x1_h || ydim0 != ydim0_advec_mom_kernel_x1_h || xdim1 != xdim1_advec_mom_kernel_x1_h || ydim1 != ydim1_advec_mom_kernel_x1_h || xdim2 != xdim2_advec_mom_kernel_x1_h || ydim2 != ydim2_advec_mom_kernel_x1_h || xdim3 != xdim3_advec_mom_kernel_x1_h || ydim3 != ydim3_advec_mom_kernel_x1_h || xdim4 != xdim4_advec_mom_kernel_x1_h || ydim4 != ydim4_advec_mom_kernel_x1_h || xdim5 != xdim5_advec_mom_kernel_x1_h || ydim5 != ydim5_advec_mom_kernel_x1_h) { hipMemcpyToSymbol( xdim0_advec_mom_kernel_x1, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel_x1_h = xdim0; hipMemcpyToSymbol( ydim0_advec_mom_kernel_x1, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel_x1_h = ydim0; hipMemcpyToSymbol( xdim1_advec_mom_kernel_x1, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel_x1_h = xdim1; hipMemcpyToSymbol( ydim1_advec_mom_kernel_x1, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel_x1_h = ydim1; hipMemcpyToSymbol( xdim2_advec_mom_kernel_x1, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel_x1_h = xdim2; hipMemcpyToSymbol( ydim2_advec_mom_kernel_x1, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel_x1_h = ydim2; hipMemcpyToSymbol( xdim3_advec_mom_kernel_x1, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel_x1_h = xdim3; hipMemcpyToSymbol( ydim3_advec_mom_kernel_x1, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel_x1_h = ydim3; hipMemcpyToSymbol( xdim4_advec_mom_kernel_x1, &xdim4, sizeof(int) ); xdim4_advec_mom_kernel_x1_h = xdim4; hipMemcpyToSymbol( ydim4_advec_mom_kernel_x1, &ydim4, sizeof(int) ); ydim4_advec_mom_kernel_x1_h = ydim4; hipMemcpyToSymbol( xdim5_advec_mom_kernel_x1, &xdim5, sizeof(int) ); xdim5_advec_mom_kernel_x1_h = xdim5; hipMemcpyToSymbol( ydim5_advec_mom_kernel_x1, &ydim5, sizeof(int) ); ydim5_advec_mom_kernel_x1_h = ydim5; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; char *p_a[6]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif //OPS_MPI int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif //OPS_MPI int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args,6,range); ops_timers_core(&c1,&t1); OPS_kernels[11].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_advec_mom_kernel_x1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[11].time += t2-t1; ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg3); OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg4); OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg5); }
8c67fd8ab1dbdafd83f4fa1d3645957678bc668e.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_x1; int xdim0_advec_mom_kernel_x1_h = -1; __constant__ int ydim0_advec_mom_kernel_x1; int ydim0_advec_mom_kernel_x1_h = -1; __constant__ int xdim1_advec_mom_kernel_x1; int xdim1_advec_mom_kernel_x1_h = -1; __constant__ int ydim1_advec_mom_kernel_x1; int ydim1_advec_mom_kernel_x1_h = -1; __constant__ int xdim2_advec_mom_kernel_x1; int xdim2_advec_mom_kernel_x1_h = -1; __constant__ int ydim2_advec_mom_kernel_x1; int ydim2_advec_mom_kernel_x1_h = -1; __constant__ int xdim3_advec_mom_kernel_x1; int xdim3_advec_mom_kernel_x1_h = -1; __constant__ int ydim3_advec_mom_kernel_x1; int ydim3_advec_mom_kernel_x1_h = -1; __constant__ int xdim4_advec_mom_kernel_x1; int xdim4_advec_mom_kernel_x1_h = -1; __constant__ int ydim4_advec_mom_kernel_x1; int ydim4_advec_mom_kernel_x1_h = -1; __constant__ int xdim5_advec_mom_kernel_x1; int xdim5_advec_mom_kernel_x1_h = -1; __constant__ int ydim5_advec_mom_kernel_x1; int ydim5_advec_mom_kernel_x1_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel_x1*(y)+xdim0_advec_mom_kernel_x1*ydim0_advec_mom_kernel_x1*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel_x1*(y)+xdim1_advec_mom_kernel_x1*ydim1_advec_mom_kernel_x1*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel_x1*(y)+xdim2_advec_mom_kernel_x1*ydim2_advec_mom_kernel_x1*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel_x1*(y)+xdim3_advec_mom_kernel_x1*ydim3_advec_mom_kernel_x1*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_mom_kernel_x1*(y)+xdim4_advec_mom_kernel_x1*ydim4_advec_mom_kernel_x1*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_advec_mom_kernel_x1*(y)+xdim5_advec_mom_kernel_x1*ydim5_advec_mom_kernel_x1*(z)) //user function __device__ inline void advec_mom_kernel_x1( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x, const double *vol_flux_y, const double *vol_flux_z) { post_vol[OPS_ACC1(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_y[OPS_ACC4(0,1,0)] - vol_flux_y[OPS_ACC4(0,0,0)] + vol_flux_z[OPS_ACC5(0,0,1)] - vol_flux_z[OPS_ACC5(0,0,0)]; pre_vol[OPS_ACC0(0,0,0)] = post_vol[OPS_ACC1(0,0,0)] + vol_flux_x[OPS_ACC3(1,0,0)] - vol_flux_x[OPS_ACC3(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_advec_mom_kernel_x1( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, const double* __restrict arg5, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_advec_mom_kernel_x1 + idx_z * 1 * xdim0_advec_mom_kernel_x1 * ydim0_advec_mom_kernel_x1; arg1 += idx_x * 1 + idx_y * 1 * xdim1_advec_mom_kernel_x1 + idx_z * 1 * xdim1_advec_mom_kernel_x1 * ydim1_advec_mom_kernel_x1; arg2 += idx_x * 1 + idx_y * 1 * xdim2_advec_mom_kernel_x1 + idx_z * 1 * xdim2_advec_mom_kernel_x1 * ydim2_advec_mom_kernel_x1; arg3 += idx_x * 1 + idx_y * 1 * xdim3_advec_mom_kernel_x1 + idx_z * 1 * xdim3_advec_mom_kernel_x1 * ydim3_advec_mom_kernel_x1; arg4 += idx_x * 1 + idx_y * 1 * xdim4_advec_mom_kernel_x1 + idx_z * 1 * xdim4_advec_mom_kernel_x1 * ydim4_advec_mom_kernel_x1; arg5 += idx_x * 1 + idx_y * 1 * xdim5_advec_mom_kernel_x1 + idx_z * 1 * xdim5_advec_mom_kernel_x1 * ydim5_advec_mom_kernel_x1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_x1(arg0, arg1, arg2, arg3, arg4, arg5); } } // host stub function void ops_par_loop_advec_mom_kernel_x1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5}; ops_timing_realloc(11,"advec_mom_kernel_x1"); OPS_kernels[11].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]*args[4].dat->dim; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]*args[5].dat->dim; int ydim5 = args[5].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_advec_mom_kernel_x1_h || ydim0 != ydim0_advec_mom_kernel_x1_h || xdim1 != xdim1_advec_mom_kernel_x1_h || ydim1 != ydim1_advec_mom_kernel_x1_h || xdim2 != xdim2_advec_mom_kernel_x1_h || ydim2 != ydim2_advec_mom_kernel_x1_h || xdim3 != xdim3_advec_mom_kernel_x1_h || ydim3 != ydim3_advec_mom_kernel_x1_h || xdim4 != xdim4_advec_mom_kernel_x1_h || ydim4 != ydim4_advec_mom_kernel_x1_h || xdim5 != xdim5_advec_mom_kernel_x1_h || ydim5 != ydim5_advec_mom_kernel_x1_h) { cudaMemcpyToSymbol( xdim0_advec_mom_kernel_x1, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel_x1_h = xdim0; cudaMemcpyToSymbol( ydim0_advec_mom_kernel_x1, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel_x1_h = ydim0; cudaMemcpyToSymbol( xdim1_advec_mom_kernel_x1, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel_x1_h = xdim1; cudaMemcpyToSymbol( ydim1_advec_mom_kernel_x1, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel_x1_h = ydim1; cudaMemcpyToSymbol( xdim2_advec_mom_kernel_x1, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel_x1_h = xdim2; cudaMemcpyToSymbol( ydim2_advec_mom_kernel_x1, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel_x1_h = ydim2; cudaMemcpyToSymbol( xdim3_advec_mom_kernel_x1, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel_x1_h = xdim3; cudaMemcpyToSymbol( ydim3_advec_mom_kernel_x1, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel_x1_h = ydim3; cudaMemcpyToSymbol( xdim4_advec_mom_kernel_x1, &xdim4, sizeof(int) ); xdim4_advec_mom_kernel_x1_h = xdim4; cudaMemcpyToSymbol( ydim4_advec_mom_kernel_x1, &ydim4, sizeof(int) ); ydim4_advec_mom_kernel_x1_h = ydim4; cudaMemcpyToSymbol( xdim5_advec_mom_kernel_x1, &xdim5, sizeof(int) ); xdim5_advec_mom_kernel_x1_h = xdim5; cudaMemcpyToSymbol( ydim5_advec_mom_kernel_x1, &ydim5, sizeof(int) ); ydim5_advec_mom_kernel_x1_h = ydim5; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; char *p_a[6]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif //OPS_MPI int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif //OPS_MPI int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args,6,range); ops_timers_core(&c1,&t1); OPS_kernels[11].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_advec_mom_kernel_x1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[11].time += t2-t1; ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg3); OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg4); OPS_kernels[11].transfer += ops_compute_transfer(dim, range, &arg5); }
7d1062f52a382360a61cf82bc6d3aa416816b13d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CudaUtils_hip.cuh" #include "MedianFilter.h" #include "CudaTimer.h" #include <iostream> __global__ void medianKernel( uint8* inputImage, uint8* outputImage, int channels, int height, int width ) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= width) { return; } int y = blockIdx.y * blockDim.y + threadIdx.y; int ch = blockIdx.z * blockDim.z + threadIdx.z; int radius = WIN_SIZE / 2; uint8 arr[WIN_SIZE * WIN_SIZE]; int ind = 0; int xFrom = max(x - radius, 0); int xTo = min(x + radius, width); int yFrom = max(y - radius, 0); int yTo = min(y + radius, height); for (int dx = xFrom; dx <= xTo; ++dx) { for (int dy = yFrom; dy <= yTo; ++dy) { arr[ind++] = inputImage[dy * width * channels + dx * channels + ch]; } } uint8 temp; for (int i = 0; i < ind - 1; i++) { for (int j = 0; j < ind - i - 1; j++) { if (arr[j] > arr[j + 1]) { temp = arr[j]; arr[j] = arr[j + 1]; arr[j + 1] = temp; } } } outputImage[y * width * channels + x * channels + ch] = arr[ind / 2]; } void MedianFilterCUDA( uint8* inputImage, uint8* outputImage, int height, int width, int channels, double& elapsed, double& kernelElapsed ) { uint8* devInputImage = NULL; uint8* devOutputImage = NULL; long imageSizeInBytes = height * width * channels; CudaTimer timer = CudaTimer(); hipMalloc((void**)&devInputImage, imageSizeInBytes); hipMalloc((void**)&devOutputImage, imageSizeInBytes); hipMemcpy(devInputImage, inputImage, imageSizeInBytes, hipMemcpyHostToDevice); int blockSize = min(1024, width); dim3 gridSize((width + blockSize - 1) / blockSize, height, channels); CudaTimer kernelTimer = CudaTimer(); SAFE_KERNEL_CALL(( hipLaunchKernelGGL(( medianKernel) , dim3(gridSize), dim3(blockSize), 0, 0, devInputImage, devOutputImage, channels, height, width ) )); kernelElapsed = kernelTimer.stop(); hipMemcpy(outputImage, devOutputImage, imageSizeInBytes, hipMemcpyDeviceToHost); hipFree(devInputImage); hipFree(devOutputImage); elapsed = timer.stop(); }
7d1062f52a382360a61cf82bc6d3aa416816b13d.cu
#include "CudaUtils.cuh" #include "MedianFilter.h" #include "CudaTimer.h" #include <iostream> __global__ void medianKernel( uint8* inputImage, uint8* outputImage, int channels, int height, int width ) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= width) { return; } int y = blockIdx.y * blockDim.y + threadIdx.y; int ch = blockIdx.z * blockDim.z + threadIdx.z; int radius = WIN_SIZE / 2; uint8 arr[WIN_SIZE * WIN_SIZE]; int ind = 0; int xFrom = max(x - radius, 0); int xTo = min(x + radius, width); int yFrom = max(y - radius, 0); int yTo = min(y + radius, height); for (int dx = xFrom; dx <= xTo; ++dx) { for (int dy = yFrom; dy <= yTo; ++dy) { arr[ind++] = inputImage[dy * width * channels + dx * channels + ch]; } } uint8 temp; for (int i = 0; i < ind - 1; i++) { for (int j = 0; j < ind - i - 1; j++) { if (arr[j] > arr[j + 1]) { temp = arr[j]; arr[j] = arr[j + 1]; arr[j + 1] = temp; } } } outputImage[y * width * channels + x * channels + ch] = arr[ind / 2]; } void MedianFilterCUDA( uint8* inputImage, uint8* outputImage, int height, int width, int channels, double& elapsed, double& kernelElapsed ) { uint8* devInputImage = NULL; uint8* devOutputImage = NULL; long imageSizeInBytes = height * width * channels; CudaTimer timer = CudaTimer(); cudaMalloc((void**)&devInputImage, imageSizeInBytes); cudaMalloc((void**)&devOutputImage, imageSizeInBytes); cudaMemcpy(devInputImage, inputImage, imageSizeInBytes, cudaMemcpyHostToDevice); int blockSize = min(1024, width); dim3 gridSize((width + blockSize - 1) / blockSize, height, channels); CudaTimer kernelTimer = CudaTimer(); SAFE_KERNEL_CALL(( medianKernel <<<gridSize, blockSize>>> ( devInputImage, devOutputImage, channels, height, width ) )); kernelElapsed = kernelTimer.stop(); cudaMemcpy(outputImage, devOutputImage, imageSizeInBytes, cudaMemcpyDeviceToHost); cudaFree(devInputImage); cudaFree(devOutputImage); elapsed = timer.stop(); }
3a723ab4b008d894a217942ee30506cbbe304263.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run matrix multiplication kernels using functions and data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA Volta GPU. Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions to compose multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set matrices will be used to compute output of matrix multiplication. First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = float. As we want to MMA instructions on Volta and they support only half-precision floating point (fp16 or half), we use data type for elements in input matrix A and B as cutlass::half_t. Volta also supports accumulation of partial dot product to fp32, which can store wider range of numbers, we use it as data type of output matrix elements and accumulation. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (float), ElementComputeEpilogue (float), ElementInputA (cutlass::half_t), ElementInputB (cutlass::half_t), ElementOutput (float). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x32, 64x64x32, 8x8x4 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, initialize and launch a high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. CUTLASS also supports multiple MMA pipelines in a CTA. What are MMA pipelines? MMA pipelines constitute the whole process of loading input data from global memory to shared memory, loading data from shared memory to registers, doing matrix multiplication, store to global memory. The below flow sequence shows a typical mma pipeline. matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers -> output to global memory The problem with single pipeline is, each stage is synchronous which means, each stage has to wait until the previous finished executing. There are stages in the pipeline which do not have fixed latency, for example, the loads from global memory and shared memory. Therefore, we can add one more pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads. Finally, the pipeline in a kernel looks like (1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to global memory This way, you can hide the second global memoroy load latency by doing computation on already loaded input data. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS GEMM kernel using cutlass::gemm::device::Gemm template. The next step is to initialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come in the way of learning CUTLASS. Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to initialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as reference GEMM kernel. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm70; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 4>; // <- MMA Op tile M = 8, N = 8, K = 4 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes ? using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per // vectorized memory access. For half // precision, it's 8 elements. This becomes // the vector width of math instructions in // epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int run() { hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (props.major != 7) { std::cerr << "Volta Tensor Ops must be run on a machine with compute capability of 70, 72, or 75." << std::endl; // Return 0 so tests are considered passing if run on unsupported architectures or CUDA Toolkits. return 0; } const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { // Volta Tensor Core operations exposed with mma.sync are first available in CUDA 10.1. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl; // Returning zero when built on older Toolkits so tests pass. The actions of this SDK example are no-op. return 0; } else { return run(); } }
3a723ab4b008d894a217942ee30506cbbe304263.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run matrix multiplication kernels using functions and data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA Volta GPU. Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing high performance kernels at scale which works for multiple problem sizes with good abstractions is really hard. CUTLASS solves this problem by providing simplified abstractions to compose multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU easily. CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp and thread-block level, they compute on their own tile-size with higher level of tile sizes being composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute threadblock-tile (tile size computed by a threadblock). In thie example, we split variable initialization into 1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties : describes how the above set matrices will be used to compute output of matrix multiplication. First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for alpha and beta to be equal to ElementComputeEpilogue = float. As we want to MMA instructions on Volta and they support only half-precision floating point (fp16 or half), we use data type for elements in input matrix A and B as cutlass::half_t. Volta also supports accumulation of partial dot product to fp32, which can store wider range of numbers, we use it as data type of output matrix elements and accumulation. We convey this to CUTLASS kernel by initializing template variables ElementAccumulator (float), ElementComputeEpilogue (float), ElementInputA (cutlass::half_t), ElementInputB (cutlass::half_t), ElementOutput (float). Communicating just the data type is not enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the data type of output ElementOutput (int32_t), the number of elements per vector memory access (16), data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X + beta * C). Now that we setup the properties of data, we have to setup properties of computation. Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x32, 64x64x32, 8x8x4 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads needed per thread-block, amount of shared memory, storing data in bank-conflict free manner, and ton of other variables required to compose, initialize and launch a high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from understanding and coding complicated hardware optimizations which can easily go wrong. CUTLASS also supports multiple MMA pipelines in a CTA. What are MMA pipelines? MMA pipelines constitute the whole process of loading input data from global memory to shared memory, loading data from shared memory to registers, doing matrix multiplication, store to global memory. The below flow sequence shows a typical mma pipeline. matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers -> output to global memory The problem with single pipeline is, each stage is synchronous which means, each stage has to wait until the previous finished executing. There are stages in the pipeline which do not have fixed latency, for example, the loads from global memory and shared memory. Therefore, we can add one more pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads. Finally, the pipeline in a kernel looks like (1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to global memory This way, you can hide the second global memoroy load latency by doing computation on already loaded input data. There are few more template variables initialized such as, which threadblock tile of output matrix is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on. These are all put together to create a template variable which describes CUTLASS GEMM kernel using cutlass::gemm::device::Gemm template. The next step is to initialize physical data, instantiate and initialize CUTLASS kernel and run it. We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come in the way of learning CUTLASS. Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space memory required by the kernel we instantiated. If yes, we create it and pass it along with other arguments created to initialize CUTLASS kernel then, the kernel is launched. In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if the output from CUTLASS kernel is same as reference GEMM kernel. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm70; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 4>; // <- MMA Op tile M = 8, N = 8, K = 4 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes ? using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per // vectorized memory access. For half // precision, it's 8 elements. This becomes // the vector width of math instructions in // epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int run() { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (props.major != 7) { std::cerr << "Volta Tensor Ops must be run on a machine with compute capability of 70, 72, or 75." << std::endl; // Return 0 so tests are considered passing if run on unsupported architectures or CUDA Toolkits. return 0; } const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { // Volta Tensor Core operations exposed with mma.sync are first available in CUDA 10.1. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl; // Returning zero when built on older Toolkits so tests pass. The actions of this SDK example are no-op. return 0; } else { return run(); } }
637b0fa8d9107bfe966ee73aaadaf7891bff8d8a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE) //support for 64 bit shuffles static __inline__ __device__ float real_shfl(float var, int srcLane) { return SHFL(var, srcLane); } static __inline__ __device__ float real_shfl(int var, int srcLane) { return SHFL(var, srcLane); } static __inline__ __device__ double real_shfl(double var, int srcLane) { int hi, lo; asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); hi = SHFL(hi, srcLane); lo = SHFL(lo, srcLane); return __hiloint2double( hi, lo ); } static __inline__ __device__ long long real_shfl(long long var, int srcLane) { int hi, lo; asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); hi = SHFL(hi, srcLane); lo = SHFL(lo, srcLane); // unforunately there isn't an __nv_hiloint2long(hi,lo) intrinsic cast int2 fuse; fuse.x = lo; fuse.y = hi; return *reinterpret_cast<long long*>(&fuse); } /** * Save the force on a single atom. */ __device__ void saveSingleForce(int atom, real3 force, unsigned long long* forceBuffers) { if (force.x != 0) atomicAdd(&forceBuffers[atom], static_cast<unsigned long long>(realToFixedPoint(force.x))); if (force.y != 0) atomicAdd(&forceBuffers[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.y))); if (force.z != 0) atomicAdd(&forceBuffers[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.z))); } /** * Compute nonbonded interactions. The kernel is separated into two parts, * tiles with exclusions and tiles without exclusions. It relies heavily on * implicit warp-level synchronization. A tile is defined by two atom blocks * each of warpsize. Each warp computes a range of tiles. * * Tiles with exclusions compute the entire set of interactions across * atom blocks, equal to warpsize*warpsize. In order to avoid access conflicts * the forces are computed and accumulated diagonally in the manner shown below * where, suppose * * [a-h] comprise atom block 1, [i-p] comprise atom block 2 * * 1 denotes the first set of calculations within the warp * 2 denotes the second set of calculations within the warp * ... etc. * * threads * 0 1 2 3 4 5 6 7 * atom1 * L a b c d e f g h * o i 1 2 3 4 5 6 7 8 * c j 8 1 2 3 4 5 6 7 * a k 7 8 1 2 3 4 5 6 * l l 6 7 8 1 2 3 4 5 * D m 5 6 7 8 1 2 3 4 * a n 4 5 6 7 8 1 2 3 * t o 3 4 5 6 7 8 1 2 * a p 2 3 4 5 6 7 8 1 * * Tiles without exclusions read off directly from the neighbourlist interactingAtoms * and follows the same force accumulation method. If more there are more interactingTiles * than the size of the neighbourlist initially allocated, the neighbourlist is rebuilt * and the full tileset is computed. This should happen on the first step, and very rarely * afterwards. * * On CUDA devices that support the shuffle intrinsic, on diagonal exclusion tiles use * __shfl to broadcast. For all other types of tiles __shfl is used to pass around the * forces, positions, and parameters when computing the forces. * * [out]forceBuffers - forces on each atom to eventually be accumulated * [out]energyBuffer - energyBuffer to eventually be accumulated * [in]posq - x,y,z,charge * [in]exclusions - 1024-bit flags denoting atom-atom exclusions for each tile * [in]exclusionTiles - x,y denotes the indices of tiles that have an exclusion * [in]startTileIndex - index into first tile to be processed * [in]numTileIndices - number of tiles this context is responsible for processing * [in]int tiles - the atom block for each tile * [in]interactionCount - total number of tiles that have an interaction * [in]maxTiles - stores the size of the neighbourlist in case it needs * - to be expanded * [in]periodicBoxSize - size of the Periodic Box, last dimension (w) not used * [in]invPeriodicBox - inverse of the periodicBoxSize, pre-computed for speed * [in]blockCenter - the center of each block in euclidean coordinates * [in]blockSize - size of the each block, radiating from the center * - x is half the distance of total length * - y is half the distance of total width * - z is half the distance of total height * - w is not used * [in]interactingAtoms - a list of interactions within a given tile * */ extern "C" __global__ void computeNonbonded( unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const tileflags* __restrict__ exclusions, const int2* __restrict__ exclusionTiles, unsigned int startTileIndex, unsigned long long numTileIndices #ifdef USE_CUTOFF , const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter, const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms, unsigned int maxSinglePairs, const int2* __restrict__ singlePairs #endif PARAMETER_ARGUMENTS) { const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE; const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; // global warpIndex const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); // index within the warp const unsigned int tbx = threadIdx.x - tgx; // block warpIndex mixed energy = 0; INIT_DERIVATIVES // First loop: process tiles that contain exclusions. const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) { const int2 tileIndices = exclusionTiles[pos]; const unsigned int x = tileIndices.x; const unsigned int y = tileIndices.y; real3 force = make_real3(0); unsigned int atom1 = x*TILE_SIZE + tgx; real4 posq1 = posq[atom1]; LOAD_ATOM1_PARAMETERS #ifdef USE_EXCLUSIONS tileflags excl = exclusions[pos*TILE_SIZE+tgx]; #endif const bool hasExclusions = true; if (x == y) { // This tile is on the diagonal. real4 shflPosq = posq1; // we do not need to fetch parameters from global since this is a symmetric tile // instead we can broadcast the values using shuffle for (unsigned int j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+j; real4 posq2; BROADCAST_WARP_DATA real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC APPLY_PERIODIC_TO_DELTA(delta) #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+j; #ifdef USE_SYMMETRIC real dEdR = 0.0f; #else real3 dEdR1 = make_real3(0); real3 dEdR2 = make_real3(0); #endif #ifdef USE_EXCLUSIONS bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1)); #endif real tempEnergy = 0.0f; const real interactionScale = 0.5f; COMPUTE_INTERACTION energy += 0.5f*tempEnergy; #ifdef INCLUDE_FORCES #ifdef USE_SYMMETRIC force.x -= delta.x*dEdR; force.y -= delta.y*dEdR; force.z -= delta.z*dEdR; #else force.x -= dEdR1.x; force.y -= dEdR1.y; force.z -= dEdR1.z; #endif #endif #ifdef USE_EXCLUSIONS excl >>= 1; #endif } } else { // This is an off-diagonal tile. unsigned int j = y*TILE_SIZE + tgx; real4 shflPosq = posq[j]; real3 shflForce; shflForce.x = 0.0f; shflForce.y = 0.0f; shflForce.z = 0.0f; DECLARE_LOCAL_PARAMETERS LOAD_LOCAL_PARAMETERS_FROM_GLOBAL #ifdef USE_EXCLUSIONS excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx)); #endif unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = shflPosq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC APPLY_PERIODIC_TO_DELTA(delta) #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+tj; #ifdef USE_SYMMETRIC real dEdR = 0.0f; #else real3 dEdR1 = make_real3(0); real3 dEdR2 = make_real3(0); #endif #ifdef USE_EXCLUSIONS bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1)); #endif real tempEnergy = 0.0f; const real interactionScale = 1.0f; COMPUTE_INTERACTION energy += tempEnergy; #ifdef INCLUDE_FORCES #ifdef USE_SYMMETRIC delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; shflForce.x += delta.x; shflForce.y += delta.y; shflForce.z += delta.z; #else // !USE_SYMMETRIC force.x -= dEdR1.x; force.y -= dEdR1.y; force.z -= dEdR1.z; shflForce.x += dEdR2.x; shflForce.y += dEdR2.y; shflForce.z += dEdR2.z; #endif // end USE_SYMMETRIC #endif SHUFFLE_WARP_DATA #ifdef USE_EXCLUSIONS excl >>= 1; #endif // cycles the indices // 0 1 2 3 4 5 6 7 -> 1 2 3 4 5 6 7 0 tj = (tj + 1) & (TILE_SIZE - 1); } const unsigned int offset = y*TILE_SIZE + tgx; // write results for off diagonal tiles #ifdef INCLUDE_FORCES atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>(realToFixedPoint(shflForce.x))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(shflForce.y))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(shflForce.z))); #endif } // Write results for on and off diagonal tiles #ifdef INCLUDE_FORCES const unsigned int offset = x*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>(realToFixedPoint(force.x))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.y))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.z))); #endif } // Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all // of them (no cutoff). #ifdef USE_NEIGHBOR_LIST const unsigned int numTiles = interactionCount[0]; if (numTiles > maxTiles) return; // There wasn't enough memory for the neighbor list. int pos = (int) (warp*(long long)numTiles/totalWarps); int end = (int) ((warp+1)*(long long)numTiles/totalWarps); #else int pos = (int) (startTileIndex+warp*numTileIndices/totalWarps); int end = (int) (startTileIndex+(warp+1)*numTileIndices/totalWarps); int skipBase = 0; int currentSkipIndex = tbx; __shared__ volatile int skipTiles[THREAD_BLOCK_SIZE]; skipTiles[threadIdx.x] = -1; #endif // atomIndices can probably be shuffled as well // but it probably wouldn't make things any faster __shared__ int atomIndices[THREAD_BLOCK_SIZE]; while (pos < end) { const bool hasExclusions = false; real3 force = make_real3(0); bool includeTile = true; // Extract the coordinates of this tile. int x, y; bool singlePeriodicCopy = false; #ifdef USE_NEIGHBOR_LIST x = tiles[pos]; real4 blockSizeX = blockSize[x]; singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= MAX_CUTOFF && 0.5f*periodicBoxSize.y-blockSizeX.y >= MAX_CUTOFF && 0.5f*periodicBoxSize.z-blockSizeX.z >= MAX_CUTOFF); #else y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos)); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error. y += (x < y ? -1 : 1); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); } // Skip over tiles that have exclusions, since they were already processed. while (skipTiles[tbx+TILE_SIZE-1] < pos) { if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) { int2 tile = exclusionTiles[skipBase+tgx]; skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2; } else skipTiles[threadIdx.x] = end; skipBase += TILE_SIZE; currentSkipIndex = tbx; } while (skipTiles[currentSkipIndex] < pos) currentSkipIndex++; includeTile = (skipTiles[currentSkipIndex] != pos); #endif if (includeTile) { unsigned int atom1 = x*TILE_SIZE + tgx; // Load atom data for this tile. real4 posq1 = posq[atom1]; LOAD_ATOM1_PARAMETERS #ifdef USE_NEIGHBOR_LIST unsigned int j = interactingAtoms[pos*TILE_SIZE+tgx]; #else unsigned int j = y*TILE_SIZE + tgx; #endif atomIndices[threadIdx.x] = j; DECLARE_LOCAL_PARAMETERS real4 shflPosq; real3 shflForce; shflForce.x = 0.0f; shflForce.y = 0.0f; shflForce.z = 0.0f; if (j < PADDED_NUM_ATOMS) { // Load position of atom j from from global memory shflPosq = posq[j]; LOAD_LOCAL_PARAMETERS_FROM_GLOBAL } else { shflPosq = make_real4(0, 0, 0, 0); CLEAR_LOCAL_PARAMETERS } #ifdef USE_PERIODIC if (singlePeriodicCopy) { // The box is small enough that we can just translate all the atoms into a single periodic // box, then skip having to apply periodic boundary conditions later. real4 blockCenterX = blockCenter[x]; APPLY_PERIODIC_TO_POS_WITH_CENTER(posq1, blockCenterX) APPLY_PERIODIC_TO_POS_WITH_CENTER(shflPosq, blockCenterX) unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = shflPosq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; #ifdef USE_SYMMETRIC real dEdR = 0.0f; #else real3 dEdR1 = make_real3(0); real3 dEdR2 = make_real3(0); #endif #ifdef USE_EXCLUSIONS bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS); #endif real tempEnergy = 0.0f; const real interactionScale = 1.0f; COMPUTE_INTERACTION energy += tempEnergy; #ifdef INCLUDE_FORCES #ifdef USE_SYMMETRIC delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; shflForce.x += delta.x; shflForce.y += delta.y; shflForce.z += delta.z; #else // !USE_SYMMETRIC force.x -= dEdR1.x; force.y -= dEdR1.y; force.z -= dEdR1.z; shflForce.x += dEdR2.x; shflForce.y += dEdR2.y; shflForce.z += dEdR2.z; #endif // end USE_SYMMETRIC #endif SHUFFLE_WARP_DATA tj = (tj + 1) & (TILE_SIZE - 1); } } else #endif { // We need to apply periodic boundary conditions separately for each interaction. unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = shflPosq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC APPLY_PERIODIC_TO_DELTA(delta) #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; #ifdef USE_SYMMETRIC real dEdR = 0.0f; #else real3 dEdR1 = make_real3(0); real3 dEdR2 = make_real3(0); #endif #ifdef USE_EXCLUSIONS bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS); #endif real tempEnergy = 0.0f; const real interactionScale = 1.0f; COMPUTE_INTERACTION energy += tempEnergy; #ifdef INCLUDE_FORCES #ifdef USE_SYMMETRIC delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; shflForce.x += delta.x; shflForce.y += delta.y; shflForce.z += delta.z; #else // !USE_SYMMETRIC force.x -= dEdR1.x; force.y -= dEdR1.y; force.z -= dEdR1.z; shflForce.x += dEdR2.x; shflForce.y += dEdR2.y; shflForce.z += dEdR2.z; #endif // end USE_SYMMETRIC #endif SHUFFLE_WARP_DATA tj = (tj + 1) & (TILE_SIZE - 1); } } // Write results. #ifdef INCLUDE_FORCES atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>(realToFixedPoint(force.x))); atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.y))); atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.z))); #ifdef USE_NEIGHBOR_LIST unsigned int atom2 = atomIndices[threadIdx.x]; #else unsigned int atom2 = y*TILE_SIZE + tgx; #endif if (atom2 < PADDED_NUM_ATOMS) { atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>(realToFixedPoint(shflForce.x))); atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(shflForce.y))); atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(shflForce.z))); } #endif } pos++; } // Third loop: single pairs that aren't part of a tile. #if USE_NEIGHBOR_LIST const unsigned int numPairs = interactionCount[1]; if (numPairs > maxSinglePairs) return; // There wasn't enough memory for the neighbor list. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numPairs; i += blockDim.x*gridDim.x) { int2 pair = singlePairs[i]; int atom1 = pair.x; int atom2 = pair.y; real4 posq1 = posq[atom1]; real4 posq2 = posq[atom2]; LOAD_ATOM1_PARAMETERS LOAD_ATOM2_PARAMETERS_FROM_GLOBAL real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC APPLY_PERIODIC_TO_DELTA(delta) #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; real invR = RSQRT(r2); real r = r2*invR; #ifdef USE_SYMMETRIC real dEdR = 0.0f; #else real3 dEdR1 = make_real3(0); real3 dEdR2 = make_real3(0); #endif bool hasExclusions = false; bool isExcluded = false; real tempEnergy = 0.0f; const real interactionScale = 1.0f; COMPUTE_INTERACTION energy += tempEnergy; #ifdef INCLUDE_FORCES #ifdef USE_SYMMETRIC real3 dEdR1 = delta*dEdR; real3 dEdR2 = -dEdR1; #endif saveSingleForce(atom1, -dEdR1, forceBuffers); saveSingleForce(atom2, -dEdR2, forceBuffers); #endif } #endif #ifdef INCLUDE_ENERGY energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy; #endif SAVE_DERIVATIVES }
637b0fa8d9107bfe966ee73aaadaf7891bff8d8a.cu
#define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE) //support for 64 bit shuffles static __inline__ __device__ float real_shfl(float var, int srcLane) { return SHFL(var, srcLane); } static __inline__ __device__ float real_shfl(int var, int srcLane) { return SHFL(var, srcLane); } static __inline__ __device__ double real_shfl(double var, int srcLane) { int hi, lo; asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); hi = SHFL(hi, srcLane); lo = SHFL(lo, srcLane); return __hiloint2double( hi, lo ); } static __inline__ __device__ long long real_shfl(long long var, int srcLane) { int hi, lo; asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); hi = SHFL(hi, srcLane); lo = SHFL(lo, srcLane); // unforunately there isn't an __nv_hiloint2long(hi,lo) intrinsic cast int2 fuse; fuse.x = lo; fuse.y = hi; return *reinterpret_cast<long long*>(&fuse); } /** * Save the force on a single atom. */ __device__ void saveSingleForce(int atom, real3 force, unsigned long long* forceBuffers) { if (force.x != 0) atomicAdd(&forceBuffers[atom], static_cast<unsigned long long>(realToFixedPoint(force.x))); if (force.y != 0) atomicAdd(&forceBuffers[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.y))); if (force.z != 0) atomicAdd(&forceBuffers[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.z))); } /** * Compute nonbonded interactions. The kernel is separated into two parts, * tiles with exclusions and tiles without exclusions. It relies heavily on * implicit warp-level synchronization. A tile is defined by two atom blocks * each of warpsize. Each warp computes a range of tiles. * * Tiles with exclusions compute the entire set of interactions across * atom blocks, equal to warpsize*warpsize. In order to avoid access conflicts * the forces are computed and accumulated diagonally in the manner shown below * where, suppose * * [a-h] comprise atom block 1, [i-p] comprise atom block 2 * * 1 denotes the first set of calculations within the warp * 2 denotes the second set of calculations within the warp * ... etc. * * threads * 0 1 2 3 4 5 6 7 * atom1 * L a b c d e f g h * o i 1 2 3 4 5 6 7 8 * c j 8 1 2 3 4 5 6 7 * a k 7 8 1 2 3 4 5 6 * l l 6 7 8 1 2 3 4 5 * D m 5 6 7 8 1 2 3 4 * a n 4 5 6 7 8 1 2 3 * t o 3 4 5 6 7 8 1 2 * a p 2 3 4 5 6 7 8 1 * * Tiles without exclusions read off directly from the neighbourlist interactingAtoms * and follows the same force accumulation method. If more there are more interactingTiles * than the size of the neighbourlist initially allocated, the neighbourlist is rebuilt * and the full tileset is computed. This should happen on the first step, and very rarely * afterwards. * * On CUDA devices that support the shuffle intrinsic, on diagonal exclusion tiles use * __shfl to broadcast. For all other types of tiles __shfl is used to pass around the * forces, positions, and parameters when computing the forces. * * [out]forceBuffers - forces on each atom to eventually be accumulated * [out]energyBuffer - energyBuffer to eventually be accumulated * [in]posq - x,y,z,charge * [in]exclusions - 1024-bit flags denoting atom-atom exclusions for each tile * [in]exclusionTiles - x,y denotes the indices of tiles that have an exclusion * [in]startTileIndex - index into first tile to be processed * [in]numTileIndices - number of tiles this context is responsible for processing * [in]int tiles - the atom block for each tile * [in]interactionCount - total number of tiles that have an interaction * [in]maxTiles - stores the size of the neighbourlist in case it needs * - to be expanded * [in]periodicBoxSize - size of the Periodic Box, last dimension (w) not used * [in]invPeriodicBox - inverse of the periodicBoxSize, pre-computed for speed * [in]blockCenter - the center of each block in euclidean coordinates * [in]blockSize - size of the each block, radiating from the center * - x is half the distance of total length * - y is half the distance of total width * - z is half the distance of total height * - w is not used * [in]interactingAtoms - a list of interactions within a given tile * */ extern "C" __global__ void computeNonbonded( unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const tileflags* __restrict__ exclusions, const int2* __restrict__ exclusionTiles, unsigned int startTileIndex, unsigned long long numTileIndices #ifdef USE_CUTOFF , const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter, const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms, unsigned int maxSinglePairs, const int2* __restrict__ singlePairs #endif PARAMETER_ARGUMENTS) { const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE; const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; // global warpIndex const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); // index within the warp const unsigned int tbx = threadIdx.x - tgx; // block warpIndex mixed energy = 0; INIT_DERIVATIVES // First loop: process tiles that contain exclusions. const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps; for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) { const int2 tileIndices = exclusionTiles[pos]; const unsigned int x = tileIndices.x; const unsigned int y = tileIndices.y; real3 force = make_real3(0); unsigned int atom1 = x*TILE_SIZE + tgx; real4 posq1 = posq[atom1]; LOAD_ATOM1_PARAMETERS #ifdef USE_EXCLUSIONS tileflags excl = exclusions[pos*TILE_SIZE+tgx]; #endif const bool hasExclusions = true; if (x == y) { // This tile is on the diagonal. real4 shflPosq = posq1; // we do not need to fetch parameters from global since this is a symmetric tile // instead we can broadcast the values using shuffle for (unsigned int j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+j; real4 posq2; BROADCAST_WARP_DATA real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC APPLY_PERIODIC_TO_DELTA(delta) #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+j; #ifdef USE_SYMMETRIC real dEdR = 0.0f; #else real3 dEdR1 = make_real3(0); real3 dEdR2 = make_real3(0); #endif #ifdef USE_EXCLUSIONS bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1)); #endif real tempEnergy = 0.0f; const real interactionScale = 0.5f; COMPUTE_INTERACTION energy += 0.5f*tempEnergy; #ifdef INCLUDE_FORCES #ifdef USE_SYMMETRIC force.x -= delta.x*dEdR; force.y -= delta.y*dEdR; force.z -= delta.z*dEdR; #else force.x -= dEdR1.x; force.y -= dEdR1.y; force.z -= dEdR1.z; #endif #endif #ifdef USE_EXCLUSIONS excl >>= 1; #endif } } else { // This is an off-diagonal tile. unsigned int j = y*TILE_SIZE + tgx; real4 shflPosq = posq[j]; real3 shflForce; shflForce.x = 0.0f; shflForce.y = 0.0f; shflForce.z = 0.0f; DECLARE_LOCAL_PARAMETERS LOAD_LOCAL_PARAMETERS_FROM_GLOBAL #ifdef USE_EXCLUSIONS excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx)); #endif unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = shflPosq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC APPLY_PERIODIC_TO_DELTA(delta) #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = y*TILE_SIZE+tj; #ifdef USE_SYMMETRIC real dEdR = 0.0f; #else real3 dEdR1 = make_real3(0); real3 dEdR2 = make_real3(0); #endif #ifdef USE_EXCLUSIONS bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1)); #endif real tempEnergy = 0.0f; const real interactionScale = 1.0f; COMPUTE_INTERACTION energy += tempEnergy; #ifdef INCLUDE_FORCES #ifdef USE_SYMMETRIC delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; shflForce.x += delta.x; shflForce.y += delta.y; shflForce.z += delta.z; #else // !USE_SYMMETRIC force.x -= dEdR1.x; force.y -= dEdR1.y; force.z -= dEdR1.z; shflForce.x += dEdR2.x; shflForce.y += dEdR2.y; shflForce.z += dEdR2.z; #endif // end USE_SYMMETRIC #endif SHUFFLE_WARP_DATA #ifdef USE_EXCLUSIONS excl >>= 1; #endif // cycles the indices // 0 1 2 3 4 5 6 7 -> 1 2 3 4 5 6 7 0 tj = (tj + 1) & (TILE_SIZE - 1); } const unsigned int offset = y*TILE_SIZE + tgx; // write results for off diagonal tiles #ifdef INCLUDE_FORCES atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>(realToFixedPoint(shflForce.x))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(shflForce.y))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(shflForce.z))); #endif } // Write results for on and off diagonal tiles #ifdef INCLUDE_FORCES const unsigned int offset = x*TILE_SIZE + tgx; atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>(realToFixedPoint(force.x))); atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.y))); atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.z))); #endif } // Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all // of them (no cutoff). #ifdef USE_NEIGHBOR_LIST const unsigned int numTiles = interactionCount[0]; if (numTiles > maxTiles) return; // There wasn't enough memory for the neighbor list. int pos = (int) (warp*(long long)numTiles/totalWarps); int end = (int) ((warp+1)*(long long)numTiles/totalWarps); #else int pos = (int) (startTileIndex+warp*numTileIndices/totalWarps); int end = (int) (startTileIndex+(warp+1)*numTileIndices/totalWarps); int skipBase = 0; int currentSkipIndex = tbx; __shared__ volatile int skipTiles[THREAD_BLOCK_SIZE]; skipTiles[threadIdx.x] = -1; #endif // atomIndices can probably be shuffled as well // but it probably wouldn't make things any faster __shared__ int atomIndices[THREAD_BLOCK_SIZE]; while (pos < end) { const bool hasExclusions = false; real3 force = make_real3(0); bool includeTile = true; // Extract the coordinates of this tile. int x, y; bool singlePeriodicCopy = false; #ifdef USE_NEIGHBOR_LIST x = tiles[pos]; real4 blockSizeX = blockSize[x]; singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= MAX_CUTOFF && 0.5f*periodicBoxSize.y-blockSizeX.y >= MAX_CUTOFF && 0.5f*periodicBoxSize.z-blockSizeX.z >= MAX_CUTOFF); #else y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos)); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error. y += (x < y ? -1 : 1); x = (pos-y*NUM_BLOCKS+y*(y+1)/2); } // Skip over tiles that have exclusions, since they were already processed. while (skipTiles[tbx+TILE_SIZE-1] < pos) { if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) { int2 tile = exclusionTiles[skipBase+tgx]; skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2; } else skipTiles[threadIdx.x] = end; skipBase += TILE_SIZE; currentSkipIndex = tbx; } while (skipTiles[currentSkipIndex] < pos) currentSkipIndex++; includeTile = (skipTiles[currentSkipIndex] != pos); #endif if (includeTile) { unsigned int atom1 = x*TILE_SIZE + tgx; // Load atom data for this tile. real4 posq1 = posq[atom1]; LOAD_ATOM1_PARAMETERS #ifdef USE_NEIGHBOR_LIST unsigned int j = interactingAtoms[pos*TILE_SIZE+tgx]; #else unsigned int j = y*TILE_SIZE + tgx; #endif atomIndices[threadIdx.x] = j; DECLARE_LOCAL_PARAMETERS real4 shflPosq; real3 shflForce; shflForce.x = 0.0f; shflForce.y = 0.0f; shflForce.z = 0.0f; if (j < PADDED_NUM_ATOMS) { // Load position of atom j from from global memory shflPosq = posq[j]; LOAD_LOCAL_PARAMETERS_FROM_GLOBAL } else { shflPosq = make_real4(0, 0, 0, 0); CLEAR_LOCAL_PARAMETERS } #ifdef USE_PERIODIC if (singlePeriodicCopy) { // The box is small enough that we can just translate all the atoms into a single periodic // box, then skip having to apply periodic boundary conditions later. real4 blockCenterX = blockCenter[x]; APPLY_PERIODIC_TO_POS_WITH_CENTER(posq1, blockCenterX) APPLY_PERIODIC_TO_POS_WITH_CENTER(shflPosq, blockCenterX) unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = shflPosq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; #ifdef USE_SYMMETRIC real dEdR = 0.0f; #else real3 dEdR1 = make_real3(0); real3 dEdR2 = make_real3(0); #endif #ifdef USE_EXCLUSIONS bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS); #endif real tempEnergy = 0.0f; const real interactionScale = 1.0f; COMPUTE_INTERACTION energy += tempEnergy; #ifdef INCLUDE_FORCES #ifdef USE_SYMMETRIC delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; shflForce.x += delta.x; shflForce.y += delta.y; shflForce.z += delta.z; #else // !USE_SYMMETRIC force.x -= dEdR1.x; force.y -= dEdR1.y; force.z -= dEdR1.z; shflForce.x += dEdR2.x; shflForce.y += dEdR2.y; shflForce.z += dEdR2.z; #endif // end USE_SYMMETRIC #endif SHUFFLE_WARP_DATA tj = (tj + 1) & (TILE_SIZE - 1); } } else #endif { // We need to apply periodic boundary conditions separately for each interaction. unsigned int tj = tgx; for (j = 0; j < TILE_SIZE; j++) { int atom2 = tbx+tj; real4 posq2 = shflPosq; real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC APPLY_PERIODIC_TO_DELTA(delta) #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; real invR = RSQRT(r2); real r = r2*invR; LOAD_ATOM2_PARAMETERS atom2 = atomIndices[tbx+tj]; #ifdef USE_SYMMETRIC real dEdR = 0.0f; #else real3 dEdR1 = make_real3(0); real3 dEdR2 = make_real3(0); #endif #ifdef USE_EXCLUSIONS bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS); #endif real tempEnergy = 0.0f; const real interactionScale = 1.0f; COMPUTE_INTERACTION energy += tempEnergy; #ifdef INCLUDE_FORCES #ifdef USE_SYMMETRIC delta *= dEdR; force.x -= delta.x; force.y -= delta.y; force.z -= delta.z; shflForce.x += delta.x; shflForce.y += delta.y; shflForce.z += delta.z; #else // !USE_SYMMETRIC force.x -= dEdR1.x; force.y -= dEdR1.y; force.z -= dEdR1.z; shflForce.x += dEdR2.x; shflForce.y += dEdR2.y; shflForce.z += dEdR2.z; #endif // end USE_SYMMETRIC #endif SHUFFLE_WARP_DATA tj = (tj + 1) & (TILE_SIZE - 1); } } // Write results. #ifdef INCLUDE_FORCES atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>(realToFixedPoint(force.x))); atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.y))); atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(force.z))); #ifdef USE_NEIGHBOR_LIST unsigned int atom2 = atomIndices[threadIdx.x]; #else unsigned int atom2 = y*TILE_SIZE + tgx; #endif if (atom2 < PADDED_NUM_ATOMS) { atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>(realToFixedPoint(shflForce.x))); atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(shflForce.y))); atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>(realToFixedPoint(shflForce.z))); } #endif } pos++; } // Third loop: single pairs that aren't part of a tile. #if USE_NEIGHBOR_LIST const unsigned int numPairs = interactionCount[1]; if (numPairs > maxSinglePairs) return; // There wasn't enough memory for the neighbor list. for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numPairs; i += blockDim.x*gridDim.x) { int2 pair = singlePairs[i]; int atom1 = pair.x; int atom2 = pair.y; real4 posq1 = posq[atom1]; real4 posq2 = posq[atom2]; LOAD_ATOM1_PARAMETERS LOAD_ATOM2_PARAMETERS_FROM_GLOBAL real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z); #ifdef USE_PERIODIC APPLY_PERIODIC_TO_DELTA(delta) #endif real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z; real invR = RSQRT(r2); real r = r2*invR; #ifdef USE_SYMMETRIC real dEdR = 0.0f; #else real3 dEdR1 = make_real3(0); real3 dEdR2 = make_real3(0); #endif bool hasExclusions = false; bool isExcluded = false; real tempEnergy = 0.0f; const real interactionScale = 1.0f; COMPUTE_INTERACTION energy += tempEnergy; #ifdef INCLUDE_FORCES #ifdef USE_SYMMETRIC real3 dEdR1 = delta*dEdR; real3 dEdR2 = -dEdR1; #endif saveSingleForce(atom1, -dEdR1, forceBuffers); saveSingleForce(atom2, -dEdR2, forceBuffers); #endif } #endif #ifdef INCLUDE_ENERGY energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy; #endif SAVE_DERIVATIVES }
8147c847d85a9d77ab343ac3cb92e74aefcc809f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string.h> #include <math.h> #include <stdlib.h> #include <stdbool.h> #define TRUE 1 #define FALSE 0 #include <unistd.h> #include <stdint.h> // #define BENCH_PRINT /*----------- using cycle counter ------------*/ __inline__ uint64_t rdtsc() { uint32_t lo, hi; /* We cannot use "=A", since this would use %rax on x86_64 */ __asm__ __volatile__("rdtsc" : "=a" (lo), "=d" (hi)); return (uint64_t)hi << 32 | lo; } unsigned long long start_cycles; #define startCycle() (start_cycles = rdtsc()) #define stopCycle(cycles) (cycles = rdtsc()-start_cycles) /*--------- using gettimeofday ------------*/ #include <sys/time.h> struct timeval starttime; struct timeval endtime; #define startTime() \ { \ gettimeofday(&starttime, 0); \ } #define stopTime(valusecs) \ { \ gettimeofday(&endtime, 0); \ valusecs = (endtime.tv_sec - starttime.tv_sec) * 1000000 + endtime.tv_usec - starttime.tv_usec; \ } int no_of_nodes; int edge_list_size; FILE *fp; //Structure to hold a node information struct Node { int starting; int no_of_edges; }; void BFSGraph(int argc, char** argv); void Usage(int argc, char**argv){ fprintf(stderr, "Usage: %s <input_file> \n", argv[0]); } //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { no_of_nodes = 0; edge_list_size = 0; BFSGraph(argc, argv); } void bfsSeq( struct Node* h_graph_nodes, bool *h_graph_mask, bool *h_updating_graph_mask, bool *h_graph_visited, int *h_graph_edges, int *h_cost ) { bool stop; do { //if no thread changes this value then the loop stops stop = false; for (int tid = 0; tid < no_of_nodes; tid++) { if (h_graph_mask[tid] == true){ h_graph_mask[tid] = false; for (int i = h_graph_nodes[tid].starting; i<(h_graph_nodes[tid].no_of_edges + h_graph_nodes[tid].starting); i++) { int id = h_graph_edges[i]; if (!h_graph_visited[id]) { h_cost[id] = h_cost[tid] + 1; h_updating_graph_mask[id] = true; } } } } for (int tid = 0; tid< no_of_nodes; tid++) { if (h_updating_graph_mask[tid] == true){ h_graph_mask[tid] = true; h_graph_visited[tid] = true; stop = true; h_updating_graph_mask[tid] = false; } } } while (stop); } __global__ void bfsIteration( struct Node *d_graph_nodes, bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int *d_graph_edges, int *d_cost, int no_of_nodes ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < no_of_nodes){ if (d_graph_mask[tid] == true){ d_graph_mask[tid] = false; for (int i = d_graph_nodes[tid].starting; i<(d_graph_nodes[tid].no_of_edges + d_graph_nodes[tid].starting); i++) { int id = d_graph_edges[i]; if (!d_graph_visited[id]) { d_cost[id] = d_cost[tid] + 1; d_updating_graph_mask[id] = true; } } } } } #ifdef _SM_35_ __global__ void bfsCheck( bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int no_of_nodes ) { __shared__ extern bool stop; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < no_of_nodes){ if (d_updating_graph_mask[tid] == true){ d_graph_mask[tid] = true; d_graph_visited[tid] = true; stop = true; d_updating_graph_mask[tid] = false; } } } __global__ void bfsStart( struct Node *d_graph_nodes, bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int *d_graph_edges, int *d_cost, int no_of_nodes ) { __shared__ bool stop; int numBlocks = ceil((float)(no_of_nodes) / 256.0); int numThreadsPerBlock = 256; do{ stop = false; bfsIteration << <numBlocks, numThreadsPerBlock >> >(d_graph_nodes, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_graph_edges, d_cost, no_of_nodes); bfsCheck << <numBlocks, numThreadsPerBlock >> >(d_graph_mask, d_updating_graph_mask, d_graph_visited, no_of_nodes); } while (stop); } void bfsCuda( struct Node *d_graph_nodes, bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int *d_graph_edges, int *d_cost, int no_of_nodes ) { hipLaunchKernelGGL(( bfsStart), dim3(1), dim3(1), 0, 0, d_graph_nodes, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_graph_edges, d_cost, no_of_nodes); } #else __global__ void bfsCheck( bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int no_of_nodes, bool *stop ) { *stop = false; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < no_of_nodes){ if (d_updating_graph_mask[tid] == true){ d_graph_mask[tid] = true; d_graph_visited[tid] = true; *stop = true; d_updating_graph_mask[tid] = false; } } } void bfsCuda( struct Node *d_graph_nodes, bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int *d_graph_edges, int *d_cost, int no_of_nodes ) { int numBlocks = ceil((float)(no_of_nodes) / 256.0); int numThreadsPerBlock = 256; bool *d_stop; bool h_stop; hipMalloc(&d_stop, sizeof(bool)); do{ hipLaunchKernelGGL(( bfsIteration) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_graph_nodes, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_graph_edges, d_cost, no_of_nodes); hipDeviceSynchronize(); hipLaunchKernelGGL(( bfsCheck) , dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, no_of_nodes, d_stop); hipDeviceSynchronize(); hipMemcpy(&h_stop, d_stop, sizeof(bool), hipMemcpyDeviceToHost); } while (h_stop); hipFree(d_stop); } #endif //////////////////////////////////////////////////////////////////////////////// //Apply BFS on a Graph using CUDA //////////////////////////////////////////////////////////////////////////////// void BFSGraph(int argc, char** argv) { char *input_f; int num_omp_threads; if (argc != 2){ Usage(argc, argv); exit(0); } input_f = argv[1]; printf("Reading File\n"); //Read in Graph from a file fp = fopen(input_f, "r"); if (!fp) { printf("Error Reading graph file\n"); return; } int source = 0; fscanf(fp, "%d", &no_of_nodes); // allocate host memory struct Node* h_graph_nodes = (struct Node*) malloc(sizeof(struct Node)*no_of_nodes); bool *h_graph_mask = (bool*)malloc(sizeof(bool)*no_of_nodes); bool *h_updating_graph_mask = (bool*)malloc(sizeof(bool)*no_of_nodes); bool *h_graph_visited = (bool*)malloc(sizeof(bool)*no_of_nodes); // allocate device memory struct Node* d_graph_nodes; bool *d_graph_mask; bool *d_updating_graph_mask; bool *d_graph_visited; hipMalloc((void**)&d_graph_nodes, sizeof(struct Node)*no_of_nodes); hipMalloc((void**)&d_graph_mask, sizeof(bool)*no_of_nodes); hipMalloc((void**)&d_updating_graph_mask, sizeof(bool)*no_of_nodes); hipMalloc((void**)&d_graph_visited, sizeof(bool)*no_of_nodes); int start, edgeno; // initalize the memory for (unsigned int i = 0; i < no_of_nodes; i++) { fscanf(fp, "%d %d", &start, &edgeno); h_graph_nodes[i].starting = start; h_graph_nodes[i].no_of_edges = edgeno; h_graph_mask[i] = false; h_updating_graph_mask[i] = false; h_graph_visited[i] = false; } //read the source node from the file fscanf(fp, "%d", &source); source = 0; //set the source node as true in the mask h_graph_mask[source] = true; h_graph_visited[source] = true; fscanf(fp, "%d", &edge_list_size); int id, cost; int* h_graph_edges = (int*)malloc(sizeof(int)*edge_list_size); for (int i = 0; i < edge_list_size; i++) { fscanf(fp, "%d", &id); fscanf(fp, "%d", &cost); h_graph_edges[i] = id; } if (fp) fclose(fp); // Initialize device memory int* d_graph_edges; hipMalloc((void**)&d_graph_edges, sizeof(int)*edge_list_size); hipMemcpy(d_graph_nodes, h_graph_nodes, sizeof(struct Node)*no_of_nodes, hipMemcpyHostToDevice); hipMemcpy(d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice); hipMemcpy(d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice); hipMemcpy(d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice); hipMemcpy(d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice); // allocate mem for the result on host side int* h_cost = (int*)malloc(sizeof(int)*no_of_nodes); for (int i = 0; i<no_of_nodes; i++) h_cost[i] = -1; h_cost[source] = 0; // allocate memory for the result on the device side int* h_cost_cuda = (int*)malloc(sizeof(int)*no_of_nodes); int* d_cost; hipMalloc((void**)&d_cost, sizeof(int)*no_of_nodes); hipMemcpy(d_cost, h_cost, sizeof(int)*no_of_nodes, hipMemcpyHostToDevice); double elapsed_time_host, elapsed_time_device; printf("Start sequential traversal of the tree\n"); starttime(); bfsSeq(h_graph_nodes, h_graph_mask, h_updating_graph_mask, h_graph_visited, h_graph_edges, h_cost); stoptime(elapsed_time_host); printf("Start parallel traversal of the tree\n"); starttime(); bfsCuda(d_graph_nodes, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_graph_edges, d_cost, no_of_nodes); stoptime(elapsed_time_device); hipMemcpy(h_cost_cuda, d_cost, sizeof(int)*no_of_nodes, hipMemcpyDeviceToHost); printf("Print results\n"); //Store the result into a file FILE *h_fpo = fopen("h_result.txt", "w"); FILE *d_fpo = fopen("d_result.txt", "w"); int test = TRUE; for (int i = 0; i < no_of_nodes; i++){ fprintf(h_fpo, "%d) cost:%d\n", i, h_cost[i]); fprintf(d_fpo, "%d) cost:%d\n", i, h_cost_cuda[i]); if (h_cost[i] != h_cost_cuda[i]){ test = FALSE; } } fclose(h_fpo); fclose(d_fpo); printf("Result stored in result.txt\n"); if (test){ printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } printf("Seq: %ld\tPar: %ld\n", elapsed_time_host, elapsed_time_device); // cleanup memory free(h_graph_nodes); free(h_graph_edges); free(h_graph_mask); free(h_updating_graph_mask); free(h_graph_visited); free(h_cost); hipFree(d_graph_nodes); hipFree(d_graph_edges); hipFree(d_graph_mask); hipFree(d_updating_graph_mask); hipFree(d_graph_visited); hipFree(d_cost); printf("Press any key...\n"); getchar(); }
8147c847d85a9d77ab343ac3cb92e74aefcc809f.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string.h> #include <math.h> #include <stdlib.h> #include <stdbool.h> #define TRUE 1 #define FALSE 0 #include <unistd.h> #include <stdint.h> // #define BENCH_PRINT /*----------- using cycle counter ------------*/ __inline__ uint64_t rdtsc() { uint32_t lo, hi; /* We cannot use "=A", since this would use %rax on x86_64 */ __asm__ __volatile__("rdtsc" : "=a" (lo), "=d" (hi)); return (uint64_t)hi << 32 | lo; } unsigned long long start_cycles; #define startCycle() (start_cycles = rdtsc()) #define stopCycle(cycles) (cycles = rdtsc()-start_cycles) /*--------- using gettimeofday ------------*/ #include <sys/time.h> struct timeval starttime; struct timeval endtime; #define startTime() \ { \ gettimeofday(&starttime, 0); \ } #define stopTime(valusecs) \ { \ gettimeofday(&endtime, 0); \ valusecs = (endtime.tv_sec - starttime.tv_sec) * 1000000 + endtime.tv_usec - starttime.tv_usec; \ } int no_of_nodes; int edge_list_size; FILE *fp; //Structure to hold a node information struct Node { int starting; int no_of_edges; }; void BFSGraph(int argc, char** argv); void Usage(int argc, char**argv){ fprintf(stderr, "Usage: %s <input_file> \n", argv[0]); } //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { no_of_nodes = 0; edge_list_size = 0; BFSGraph(argc, argv); } void bfsSeq( struct Node* h_graph_nodes, bool *h_graph_mask, bool *h_updating_graph_mask, bool *h_graph_visited, int *h_graph_edges, int *h_cost ) { bool stop; do { //if no thread changes this value then the loop stops stop = false; for (int tid = 0; tid < no_of_nodes; tid++) { if (h_graph_mask[tid] == true){ h_graph_mask[tid] = false; for (int i = h_graph_nodes[tid].starting; i<(h_graph_nodes[tid].no_of_edges + h_graph_nodes[tid].starting); i++) { int id = h_graph_edges[i]; if (!h_graph_visited[id]) { h_cost[id] = h_cost[tid] + 1; h_updating_graph_mask[id] = true; } } } } for (int tid = 0; tid< no_of_nodes; tid++) { if (h_updating_graph_mask[tid] == true){ h_graph_mask[tid] = true; h_graph_visited[tid] = true; stop = true; h_updating_graph_mask[tid] = false; } } } while (stop); } __global__ void bfsIteration( struct Node *d_graph_nodes, bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int *d_graph_edges, int *d_cost, int no_of_nodes ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < no_of_nodes){ if (d_graph_mask[tid] == true){ d_graph_mask[tid] = false; for (int i = d_graph_nodes[tid].starting; i<(d_graph_nodes[tid].no_of_edges + d_graph_nodes[tid].starting); i++) { int id = d_graph_edges[i]; if (!d_graph_visited[id]) { d_cost[id] = d_cost[tid] + 1; d_updating_graph_mask[id] = true; } } } } } #ifdef _SM_35_ __global__ void bfsCheck( bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int no_of_nodes ) { __shared__ extern bool stop; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < no_of_nodes){ if (d_updating_graph_mask[tid] == true){ d_graph_mask[tid] = true; d_graph_visited[tid] = true; stop = true; d_updating_graph_mask[tid] = false; } } } __global__ void bfsStart( struct Node *d_graph_nodes, bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int *d_graph_edges, int *d_cost, int no_of_nodes ) { __shared__ bool stop; int numBlocks = ceil((float)(no_of_nodes) / 256.0); int numThreadsPerBlock = 256; do{ stop = false; bfsIteration << <numBlocks, numThreadsPerBlock >> >(d_graph_nodes, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_graph_edges, d_cost, no_of_nodes); bfsCheck << <numBlocks, numThreadsPerBlock >> >(d_graph_mask, d_updating_graph_mask, d_graph_visited, no_of_nodes); } while (stop); } void bfsCuda( struct Node *d_graph_nodes, bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int *d_graph_edges, int *d_cost, int no_of_nodes ) { bfsStart<<<1, 1>>>(d_graph_nodes, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_graph_edges, d_cost, no_of_nodes); } #else __global__ void bfsCheck( bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int no_of_nodes, bool *stop ) { *stop = false; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < no_of_nodes){ if (d_updating_graph_mask[tid] == true){ d_graph_mask[tid] = true; d_graph_visited[tid] = true; *stop = true; d_updating_graph_mask[tid] = false; } } } void bfsCuda( struct Node *d_graph_nodes, bool *d_graph_mask, bool *d_updating_graph_mask, bool *d_graph_visited, int *d_graph_edges, int *d_cost, int no_of_nodes ) { int numBlocks = ceil((float)(no_of_nodes) / 256.0); int numThreadsPerBlock = 256; bool *d_stop; bool h_stop; cudaMalloc(&d_stop, sizeof(bool)); do{ bfsIteration <<<numBlocks, numThreadsPerBlock >>>(d_graph_nodes, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_graph_edges, d_cost, no_of_nodes); cudaDeviceSynchronize(); bfsCheck <<<numBlocks, numThreadsPerBlock>>>(d_graph_mask, d_updating_graph_mask, d_graph_visited, no_of_nodes, d_stop); cudaDeviceSynchronize(); cudaMemcpy(&h_stop, d_stop, sizeof(bool), cudaMemcpyDeviceToHost); } while (h_stop); cudaFree(d_stop); } #endif //////////////////////////////////////////////////////////////////////////////// //Apply BFS on a Graph using CUDA //////////////////////////////////////////////////////////////////////////////// void BFSGraph(int argc, char** argv) { char *input_f; int num_omp_threads; if (argc != 2){ Usage(argc, argv); exit(0); } input_f = argv[1]; printf("Reading File\n"); //Read in Graph from a file fp = fopen(input_f, "r"); if (!fp) { printf("Error Reading graph file\n"); return; } int source = 0; fscanf(fp, "%d", &no_of_nodes); // allocate host memory struct Node* h_graph_nodes = (struct Node*) malloc(sizeof(struct Node)*no_of_nodes); bool *h_graph_mask = (bool*)malloc(sizeof(bool)*no_of_nodes); bool *h_updating_graph_mask = (bool*)malloc(sizeof(bool)*no_of_nodes); bool *h_graph_visited = (bool*)malloc(sizeof(bool)*no_of_nodes); // allocate device memory struct Node* d_graph_nodes; bool *d_graph_mask; bool *d_updating_graph_mask; bool *d_graph_visited; cudaMalloc((void**)&d_graph_nodes, sizeof(struct Node)*no_of_nodes); cudaMalloc((void**)&d_graph_mask, sizeof(bool)*no_of_nodes); cudaMalloc((void**)&d_updating_graph_mask, sizeof(bool)*no_of_nodes); cudaMalloc((void**)&d_graph_visited, sizeof(bool)*no_of_nodes); int start, edgeno; // initalize the memory for (unsigned int i = 0; i < no_of_nodes; i++) { fscanf(fp, "%d %d", &start, &edgeno); h_graph_nodes[i].starting = start; h_graph_nodes[i].no_of_edges = edgeno; h_graph_mask[i] = false; h_updating_graph_mask[i] = false; h_graph_visited[i] = false; } //read the source node from the file fscanf(fp, "%d", &source); source = 0; //set the source node as true in the mask h_graph_mask[source] = true; h_graph_visited[source] = true; fscanf(fp, "%d", &edge_list_size); int id, cost; int* h_graph_edges = (int*)malloc(sizeof(int)*edge_list_size); for (int i = 0; i < edge_list_size; i++) { fscanf(fp, "%d", &id); fscanf(fp, "%d", &cost); h_graph_edges[i] = id; } if (fp) fclose(fp); // Initialize device memory int* d_graph_edges; cudaMalloc((void**)&d_graph_edges, sizeof(int)*edge_list_size); cudaMemcpy(d_graph_nodes, h_graph_nodes, sizeof(struct Node)*no_of_nodes, cudaMemcpyHostToDevice); cudaMemcpy(d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice); cudaMemcpy(d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice); cudaMemcpy(d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice); cudaMemcpy(d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice); // allocate mem for the result on host side int* h_cost = (int*)malloc(sizeof(int)*no_of_nodes); for (int i = 0; i<no_of_nodes; i++) h_cost[i] = -1; h_cost[source] = 0; // allocate memory for the result on the device side int* h_cost_cuda = (int*)malloc(sizeof(int)*no_of_nodes); int* d_cost; cudaMalloc((void**)&d_cost, sizeof(int)*no_of_nodes); cudaMemcpy(d_cost, h_cost, sizeof(int)*no_of_nodes, cudaMemcpyHostToDevice); double elapsed_time_host, elapsed_time_device; printf("Start sequential traversal of the tree\n"); starttime(); bfsSeq(h_graph_nodes, h_graph_mask, h_updating_graph_mask, h_graph_visited, h_graph_edges, h_cost); stoptime(elapsed_time_host); printf("Start parallel traversal of the tree\n"); starttime(); bfsCuda(d_graph_nodes, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_graph_edges, d_cost, no_of_nodes); stoptime(elapsed_time_device); cudaMemcpy(h_cost_cuda, d_cost, sizeof(int)*no_of_nodes, cudaMemcpyDeviceToHost); printf("Print results\n"); //Store the result into a file FILE *h_fpo = fopen("h_result.txt", "w"); FILE *d_fpo = fopen("d_result.txt", "w"); int test = TRUE; for (int i = 0; i < no_of_nodes; i++){ fprintf(h_fpo, "%d) cost:%d\n", i, h_cost[i]); fprintf(d_fpo, "%d) cost:%d\n", i, h_cost_cuda[i]); if (h_cost[i] != h_cost_cuda[i]){ test = FALSE; } } fclose(h_fpo); fclose(d_fpo); printf("Result stored in result.txt\n"); if (test){ printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } printf("Seq: %ld\tPar: %ld\n", elapsed_time_host, elapsed_time_device); // cleanup memory free(h_graph_nodes); free(h_graph_edges); free(h_graph_mask); free(h_updating_graph_mask); free(h_graph_visited); free(h_cost); cudaFree(d_graph_nodes); cudaFree(d_graph_edges); cudaFree(d_graph_mask); cudaFree(d_updating_graph_mask); cudaFree(d_graph_visited); cudaFree(d_cost); printf("Press any key...\n"); getchar(); }
3b75c04ef7fd137a983949a2caa48d5d662ae0bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ // Torch port: // IMAGINE, Sergey Zagoruyko, Francisco Massa, 2015 #include "THH.h" #include <algorithm> #include <cfloat> #include "assert.h" #include "common.h" using std::max; using std::min; template <typename Dtype> __global__ void ROIWarpForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, const Dtype* bottom_delta_rois, Dtype* top_data, Dtype* top_data_buffer) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = (bottom_rois[0] - 1); //int roi_start_w = round((bottom_rois[1] - 1) * spatial_scale); //int roi_start_h = round((bottom_rois[2] - 1)* spatial_scale); //int roi_end_w = round((bottom_rois[3] - 1) * spatial_scale); //int roi_end_h = round((bottom_rois[4] - 1) * spatial_scale); Dtype src_w = bottom_rois[3] - bottom_rois[1] + 1; Dtype src_h = bottom_rois[4] - bottom_rois[2] + 1; Dtype src_ctr_x = bottom_rois[1] + 0.5*(src_w-1.0); Dtype src_ctr_y = bottom_rois[2] + 0.5*(src_h-1.0); Dtype dst_ctr_x = bottom_delta_rois[1]; // dx (in fast-rcnn notation) = cx (in here) Dtype dst_ctr_y = bottom_delta_rois[2]; // dy (in fast-rcnn notation) = cy (in here) Dtype dst_scl_x = bottom_delta_rois[3]; // dw (in fast-rcnn notation) = sx (in here) Dtype dst_scl_y = bottom_delta_rois[4]; // dh (in fast-rcnn notation) = sy (in here) Dtype pred_ctr_x = dst_ctr_x * src_w + src_ctr_x; Dtype pred_ctr_y = dst_ctr_y * src_h + src_ctr_y; Dtype pred_w = exp(dst_scl_x) * src_w; Dtype pred_h = exp(dst_scl_y) * src_h; Dtype roi_start_w = ( (pred_ctr_x - 0.5*(pred_w-1)) - 1 ) * spatial_scale; Dtype roi_start_h = ( (pred_ctr_y - 0.5*(pred_h-1)) - 1 ) * spatial_scale; Dtype roi_end_w = ( (pred_ctr_x + 0.5*(pred_w-1)) - 1 ) * spatial_scale; Dtype roi_end_h = ( (pred_ctr_y + 0.5*(pred_h-1)) - 1 ) * spatial_scale; assert(roi_end_w - roi_start_w >= 0); assert(roi_end_h - roi_start_h >= 0); // Force malformed ROIs to be 1x1 Dtype roi_width = roi_end_w - roi_start_w + 1; Dtype roi_height = roi_end_h - roi_start_h + 1; Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype wstart_ = static_cast<Dtype>(pw) * bin_size_w + roi_start_w; Dtype hstart_ = static_cast<Dtype>(ph) * bin_size_h + roi_start_h; Dtype wend_ = static_cast<Dtype>(pw+1) * bin_size_w + roi_start_w; Dtype hend_ = static_cast<Dtype>(ph+1) * bin_size_h + roi_start_h; int wstart = static_cast<int>(floor(wstart_)); int hstart = static_cast<int>(floor(hstart_)); int wend = static_cast<int>( ceil(wend_)); int hend = static_cast<int>( ceil(hend_)); Dtype wctr = (wend_ + wstart_) * 0.5; // dwctr / dwe = 0.5; dwctr / dws = 0.5 Dtype hctr = (hend_ + hstart_) * 0.5; // dhctr / dhe = 0.5; dhctr / dhs = 0.5 Dtype wdiff = (wend_ - wstart_) + 1; // dwdiff / dwe = 1; dwdiff / dws = -1 Dtype hdiff = (hend_ - hstart_) + 1; // dhdiff / dhe = 1; dhdiff / dhs = -1 //top_data[index] = static_cast<Dtype>(hend-1-hstart)+1; //top_data[index] = hend; //wend; //top_data[index] = hstart+1; // wstart+1; //top_data[index] = wdiff; //top_data[index] = hctr+1; //top_data[index] = wctr+1; // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); //top_data[index] = hstart+1; //top_data[index] = wstart+1; // Auxilliary variables used in backprop Dtype w_mask = 0, h_mask = 0; Dtype dgx_final_dwctr_all = 0; Dtype dgx_final_dwdiff_all = 0; Dtype dgy_final_dhctr_all = 0; Dtype dgy_final_dhdiff_all = 0; // Define an empty pooling region to be zero Dtype val = 0; Dtype gain = 0, gain_x = 0, gain_y = 0, gain_x_all = 0, gain_y_all = 0; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { Dtype h_ = h; for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; Dtype w_ = w; //gain_x = max(0., 1 - abs( dst_ctr_x + static_cast<Dtype>(pw) / static_cast<Dtype>(pooled_width) * dst_scl_x - w )); -- in paper, but makes no sense //gain_y = max(0., 1 - abs( dst_ctr_y + static_cast<Dtype>(ph) / static_cast<Dtype>(pooled_height) * dst_scl_y - h)); gain_x = wdiff - abs((w_ - wctr)); gain_y = hdiff - abs((h_ - hctr)); gain = gain_x * gain_y; val = val + gain * bottom_data[bottom_index]; //val = val + gain; //val = val + 1; if (h == hstart) { gain_x_all = gain_x_all + gain_x; // Update information used in backprop w_mask = w_ >= wctr ? 1 : -1; dgx_final_dwctr_all = dgx_final_dwctr_all + w_mask; dgx_final_dwdiff_all = dgx_final_dwdiff_all + 1; } } gain_y_all = gain_y_all + gain_y; h_mask = h >= hctr ? 1 : -1; dgy_final_dhctr_all = dgy_final_dhctr_all + h_mask; dgy_final_dhdiff_all = dgy_final_dhdiff_all + 1; } if (gain_x_all > 1e-10) val = val / gain_x_all; if (gain_y_all > 1e-10) val = val / gain_y_all; top_data[index] = val; //top_data[index] = gain_x_all; //top_data[index] = gain_y_all; int buffer_index = n * (channels * pooled_height * pooled_width * 10) + c * (pooled_height * pooled_width * 10) + ph * (pooled_width * 10) + pw * 10; top_data_buffer[buffer_index+0] = wctr; top_data_buffer[buffer_index+1] = wdiff; top_data_buffer[buffer_index+2] = hctr; top_data_buffer[buffer_index+3] = hdiff; top_data_buffer[buffer_index+4] = gain_x_all; top_data_buffer[buffer_index+5] = gain_y_all; top_data_buffer[buffer_index+6] = dgx_final_dwctr_all; top_data_buffer[buffer_index+7] = dgy_final_dhctr_all; top_data_buffer[buffer_index+8] = dgx_final_dwdiff_all; top_data_buffer[buffer_index+9] = dgy_final_dhdiff_all; } } extern "C" void inn_ROIWarping_updateOutput(THCState *state, THCudaTensor *output, THCudaTensor *output_buffer, THCudaTensor *data, THCudaTensor* rois, THCudaTensor* delta_rois, int W, int H, double spatial_scale) { THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_nDimension(state, delta_rois) == 2 && delta_rois->size[1] == 5); THAssert(THCudaTensor_nDimension(state, rois) == THCudaTensor_nDimension(state, delta_rois) && rois->size[0] == delta_rois->size[0] && rois->size[1] == delta_rois->size[1]); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, rois)); THAssert(THCudaTensor_isContiguous(state, delta_rois)); long num_rois = rois->size[0]; long nInputPlane = data->size[1]; THCudaTensor_resize4d(state, output, num_rois, nInputPlane, H, W); THCudaTensor_resize5d(state, output_buffer, num_rois, nInputPlane, H, W, 10); THCudaTensor_zero(state, output_buffer); long count = THCudaTensor_nElement(state, output); hipLaunchKernelGGL(( ROIWarpForward<float>), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), count, THCudaTensor_data(state, data), spatial_scale, nInputPlane, data->size[2], data->size[3], H, W, THCudaTensor_data(state, rois), THCudaTensor_data(state, delta_rois), THCudaTensor_data(state, output), THCudaTensor_data(state, output_buffer) ); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in inn_ROIWarping_updateOutput: %s\n", hipGetErrorString(err)); THError("aborting"); } } template <typename Dtype> __global__ void ROIWarpBackwardData(const int nthreads, const Dtype* top_data_buffer, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int nth_roi, const Dtype* bottom_rois, const Dtype* bottom_delta_rois, const Dtype* top_diff, Dtype* bottom_diff_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) is an element in the input int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; bottom_rois += nth_roi * 5; int roi_batch_ind = (bottom_rois[0] - 1); if (roi_batch_ind == n) { Dtype src_w = bottom_rois[3] - bottom_rois[1] + 1; Dtype src_h = bottom_rois[4] - bottom_rois[2] + 1; Dtype src_ctr_x = bottom_rois[1] + 0.5*(src_w-1.0); Dtype src_ctr_y = bottom_rois[2] + 0.5*(src_h-1.0); Dtype dst_ctr_x = bottom_delta_rois[1]; // dx (in fast-rcnn notation) = cx (in here) Dtype dst_ctr_y = bottom_delta_rois[2]; // dy (in fast-rcnn notation) = cy (in here) Dtype dst_scl_x = bottom_delta_rois[3]; // dw (in fast-rcnn notation) = sx (in here) Dtype dst_scl_y = bottom_delta_rois[4]; // dh (in fast-rcnn notation) = sy (in here) Dtype pred_ctr_x = dst_ctr_x * src_w + src_ctr_x; // dpcx / dcx = src_w Dtype pred_ctr_y = dst_ctr_y * src_h + src_ctr_y; // dpcy / dcy = src_h Dtype pred_w = exp(dst_scl_x) * src_w; // dpw / dsx = src_w * exp(dsx) Dtype pred_h = exp(dst_scl_y) * src_h; // dph / dsy = src_h * exp(dsy) Dtype roi_start_w = ( (pred_ctr_x - 0.5*(pred_w-1)) - 1 ) * spatial_scale; // drsw / dpcx = spatial_scale; drsw / dpw = -0.5 * spatial_scale Dtype roi_start_h = ( (pred_ctr_y - 0.5*(pred_h-1)) - 1 ) * spatial_scale; // drsh / dpcy = spatial_scale; drsh / dph = -0.5 * spatial_scale Dtype roi_end_w = ( (pred_ctr_x + 0.5*(pred_w-1)) - 1 ) * spatial_scale; // drew / dpcx = spatial_scale; drew / dpw = 0.5 * spatial_scale Dtype roi_end_h = ( (pred_ctr_y + 0.5*(pred_h-1)) - 1 ) * spatial_scale; // dreh / dpcy = spatial_scale; dreh / dph = 0.5 * spatial_scale assert(roi_end_w - roi_start_w >= 0); assert(roi_end_h - roi_start_h >= 0); Dtype roi_width = roi_end_w - roi_start_w + 1; Dtype roi_height = roi_end_h - roi_start_h + 1; Dtype bin_size_pw = static_cast<Dtype>(pooled_width) / roi_width; Dtype bin_size_ph = static_cast<Dtype>(pooled_height) / roi_height; int pwstart = static_cast<int>(floor(static_cast<Dtype>(-roi_start_w + w) * bin_size_pw)); int phstart = static_cast<int>(floor(static_cast<Dtype>(-roi_start_h + h) * bin_size_ph)); int pwend = static_cast<int>(ceil(static_cast<Dtype>(-roi_start_w + w+1) * bin_size_pw)); int phend = static_cast<int>(ceil(static_cast<Dtype>(-roi_start_h + h+1) * bin_size_ph)); //bottom_diff_data[index] = pwend; //phend; //bottom_diff_data[index] = pwstart+1; //phend; // Clip to top boundaries phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); Dtype w_ = w, h_ = h; Dtype wctr = 0, wdiff = 0, hctr = 0, hdiff = 0; Dtype gain = 0, gain_x = 0, gain_y = 0, gain_x_all = 0, gain_y_all = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int top_index = nth_roi * (channels * pooled_height * pooled_width) + c * (pooled_height * pooled_width) + ph * pooled_width + pw; int top_buffer_index = nth_roi * (channels * pooled_height * pooled_width * 10) + c * (pooled_height * pooled_width * 10) + ph * (pooled_width * 10) + pw * 10; wctr = top_data_buffer[top_buffer_index+0]; wdiff = top_data_buffer[top_buffer_index+1]; hctr = top_data_buffer[top_buffer_index+2]; hdiff = top_data_buffer[top_buffer_index+3]; gain_x_all = top_data_buffer[top_buffer_index+4]; gain_y_all = top_data_buffer[top_buffer_index+5]; gain_x = wdiff - abs((w_ - wctr)); // dgx / dwdiff = 1 // dgx / dwctr = 1 ( if w >= wctr ) // dgx / dwctr = - 1 ( else ) gain_y = hdiff - abs((h_ - hctr)); // dgy / dhdiff = 1 // dgy / dhctr = 1 ( if h >= hctr ) // dgy / dhctr = - 1 ( else ) if (gain_x_all > 1e-10) gain_x = gain_x / gain_x_all; if (gain_y_all > 1e-10) gain_y = gain_y / gain_y_all; gain = gain_x * gain_y; bottom_diff_data[index] = bottom_diff_data[index] + gain * top_diff[top_index]; //val = val + gain * bottom_data[bottom_index]; } } } } } template <typename Dtype> __global__ void ROIWarpBackwardDeltaROI(const int nthreads, const Dtype* top_data_buffer, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, const Dtype* bottom_delta_rois, const Dtype* top_diff, const Dtype* bottom_data, Dtype* bottom_diff_delta_rois_buffer) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int buffer_index = n * (channels * pooled_height * pooled_width * 10) + c * (pooled_height * pooled_width * 10) + ph * (pooled_width * 10) + pw * 10; Dtype wctr = top_data_buffer[buffer_index+0]; Dtype wdiff = top_data_buffer[buffer_index+1]; Dtype hctr = top_data_buffer[buffer_index+2]; Dtype hdiff = top_data_buffer[buffer_index+3]; Dtype gain_x_all = top_data_buffer[buffer_index+4]; Dtype gain_y_all = top_data_buffer[buffer_index+5]; Dtype dgx_final_dwctr_all = top_data_buffer[buffer_index+6]; Dtype dgy_final_dhctr_all = top_data_buffer[buffer_index+7]; Dtype dgx_final_dwdiff_all = top_data_buffer[buffer_index+8]; Dtype dgy_final_dhdiff_all = top_data_buffer[buffer_index+9]; //if (gain_x_all > 1e-10 && gain_y_all > 1e-10) { bottom_rois += n * 5; int roi_batch_ind = (bottom_rois[0] - 1); Dtype src_w = bottom_rois[3] - bottom_rois[1] + 1; Dtype src_h = bottom_rois[4] - bottom_rois[2] + 1; Dtype src_ctr_x = bottom_rois[1] + 0.5*(src_w-1.0); Dtype src_ctr_y = bottom_rois[2] + 0.5*(src_h-1.0); Dtype dst_ctr_x = bottom_delta_rois[1]; // dx (in fast-rcnn notation) = cx (in here) Dtype dst_ctr_y = bottom_delta_rois[2]; // dy (in fast-rcnn notation) = cy (in here) Dtype dst_scl_x = bottom_delta_rois[3]; // dw (in fast-rcnn notation) = sx (in here) Dtype dst_scl_y = bottom_delta_rois[4]; // dh (in fast-rcnn notation) = sy (in here) Dtype pred_ctr_x = dst_ctr_x * src_w + src_ctr_x; // dpcx / dcx = src_w Dtype pred_ctr_y = dst_ctr_y * src_h + src_ctr_y; // dpcy / dcy = src_h Dtype pred_w = exp(dst_scl_x) * src_w; // dpw / dsx = src_w * exp(dsx) Dtype pred_h = exp(dst_scl_y) * src_h; // dph / dsy = src_h * exp(dsy) Dtype roi_start_w = ( (pred_ctr_x - 0.5*(pred_w-1)) - 1 ) * spatial_scale; // drsw / dpcx = spatial_scale // drsw / dpw = -0.5 * spatial_scale Dtype roi_start_h = ( (pred_ctr_y - 0.5*(pred_h-1)) - 1 ) * spatial_scale; // drsh / dpcy = spatial_scale // drsh / dph = -0.5 * spatial_scale Dtype roi_end_w = ( (pred_ctr_x + 0.5*(pred_w-1)) - 1 ) * spatial_scale; // drew / dpcx = spatial_scale // drew / dpw = 0.5 * spatial_scale Dtype roi_end_h = ( (pred_ctr_y + 0.5*(pred_h-1)) - 1 ) * spatial_scale; // dreh / dpcy = spatial_scale // dreh / dph = 0.5 * spatial_scale assert(roi_end_w - roi_start_w >= 0); assert(roi_end_h - roi_start_h >= 0); // drsw / dcx = drsw / dpcx * dpcx / dcx = spatial_scale * src_w // drew / dcx = drew / dpcx * dpcx / dcx = spatial_scale * src_w // drsh / dcy = drsh / dpcy * dpcy / dcy = spatial_scale * src_h // dreh / dcy = dreh / dpcy * dpcy / dcy = spatial_scale * src_h // drsw / dsx = drsw / dpw * dpw / dsx = -0.5 * spatial_scale * src_w * exp(dsx) // drew / dsx = drew / dpw * dpw / dsx = 0.5 * spatial_scale * src_w * exp(dsx) // drsh / dsy = drsh / dph * dph / dsy = -0.5 * spatial_scale * src_h * exp(dsy) // dreh / dsy = dreh / dph * dph / dsy = 0.5 * spatial_scale * src_h * exp(dsy) // Force malformed ROIs to be 1x1 Dtype roi_width = roi_end_w - roi_start_w + 1; // drw / drew = 1 // drw / drsw = -1 Dtype roi_height = roi_end_h - roi_start_h + 1; // drh / dreh = 1 // drh / drsh = -1 // drw / dcx = drw / drew * drew / dcx + drw / drsw * drsw / dcx = drew / dcx - drsw / dcx // = spatial_scale * src_w - spatial_scale * src_w = 0 // drh / dcy = drh / dreh * dreh / dcy + drh / drsh * drsh / dcy = dreh / dcy - drsh / dcy = spatial_scale * src_h - spatial_scale * src_h = 0 // drw / dsx = drw / drew * drew / dsx + drw / drsw * drsw / dsx = drew / dsx - drsw / dsx = 0.5 * spatial_scale * src_w * exp(dsx) - (-0.5 * spatial_scale * src_w * exp(dsx)) = spatial_scale * src_w * exp(dsx) // drh / dsy = drh / dreh * dreh / dsy + drh / drsh * drsh / dsy = dreh / dsy - drsh / dsy = 0.5 * spatial_scale * src_h * exp(dsy) - (-0.5 * spatial_scale * src_h * exp(dsy)) = spatial_scale * src_h * exp(dsy) Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); // dbw / drw = 1 / pooled_width Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); // dbh / drh = 1 / pooled_height // dbw / dcx = dbw / drw * drw / dcx = 0 // dbh / dcy = dbh / drh * drh / dcy = 0 // dbw / dsx = dbw / drw * drw / dsx = 1 / pooled_width * spatial_scale * src_w * exp(dsx) // dbh / dsy = dbh / drh * drh / dsy = 1 / pooled_height * spatial_scale * src_h * exp(dsy) Dtype wstart_ = static_cast<Dtype>(pw) * bin_size_w + roi_start_w; // ws = f(rsw, rew) Dtype hstart_ = static_cast<Dtype>(ph) * bin_size_h + roi_start_h; // hw = f(rsh, reh) Dtype wend_ = static_cast<Dtype>(pw+1) * bin_size_w + roi_start_w; // we = f(rsw, rew) Dtype hend_ = static_cast<Dtype>(ph+1) * bin_size_h + roi_start_h; // he = f(rsh, reh) // dws / dbw = pw // dhs / dbh = ph // dwe / dbw = (pw+1) // dhe / dbh = (ph+1) int wstart = static_cast<int>(floor(wstart_)); int hstart = static_cast<int>(floor(hstart_)); int wend = static_cast<int>( ceil(wend_)); int hend = static_cast<int>( ceil(hend_)); // dws / dcx = dws / drsw * drsw / dcx + dws / drew * drew / dcx // = (dws / dbw * dbw / drsw + 1) * drsw / dcx + (dws / dbw * dbw / drew) * drew / dcx // = (pw * 1 / pooled_width * (-1) + 1) * spatial_scale * src_w // + (pw * 1 / pooled_width * ( 1) ) * spatial_scale * src_w // = spatial_scale * src_w // dwe / dcx = dwe / drsw * drsw / dcx + dwe / drew * drew / dcx // = (dwe / dbw * dbw / drsw + 1) * drsw / dcx + (dwe / dbw * dbw / drew) * drew / dcx // = ((pw+1) * 1 / pooled_width * (-1) + 1) * spatial_scale * src_w // + ((pw+1) * 1 / pooled_width * ( 1) ) * spatial_scale * src_w // = spatial_scale * src_w // dws / dsx = dws / drsw * drsw / dsx + dws / drew * drew / dsx // = (dws / dbw * dbw / drsw + 1) * drsw / dsx + (dws / dbw * dbw / drew) * drew / dsx // = (pw * 1 / pooled_width * (-1) + 1) * (-0.5 * spatial_scale * src_w * exp(dsx)) // + (pw * 1 / pooled_width * ( 1) ) * ( 0.5 * spatial_scale * src_w * exp(dsx)) // = (pw * 1 / pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // dwe / dsx = dwe / drsw * drsw / dsx + dwe / drew * drew / dsx // = (dwe / dbw * dbw / drsw + 1) * drsw / dsx + (dwe / dbw * dbw / drew) * drew / dsx // = ((pw+1) * 1 / pooled_width * (-1) + 1) * (-0.5 * spatial_scale * src_w * exp(dsx)) // + ((pw+1) * 1 / pooled_width * ( 1) ) * ( 0.5 * spatial_scale * src_w * exp(dsx)) // = ((pw+1) * 1 / pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // dws / dcy = spatial_scale * src_h // dwe / dcy = spatial_scale * src_h // dws / dsy = ( ph * 1 / pooled_height - 1) * spatial_scale * src_h * exp(dsy) // dwe / dsy = ((ph+1) * 1 / pooled_height - 1) * spatial_scale * src_h * exp(dsy) /* Dtype wctr = (wend_ + wstart_) * 0.5; // dwctr / dwe = 0.5; dwctr / dws = 0.5 Dtype hctr = (hend_ + hstart_) * 0.5; // dhctr / dhe = 0.5; dhctr / dhs = 0.5 Dtype wdiff = (wend_ - wstart_) + 1; // dwdiff / dwe = 1; dwdiff / dws = -1 Dtype hdiff = (hend_ - hstart_) + 1; // dhdiff / dhe = 1; dhdiff / dhs = -1 // dwctr / dcx = dwctr / dwe * dwe / dcx + dwctr / dws * dws / dcx = 0.5 * spatial_scale * src_w + 0.5 * spatial_scale * src_w = spatial_scale * src_w // dwdiff / dcx = dwdiff / dwe * dwe / dcx + dwdiff / dws * dws / dcx = 1 * spatial_scale * src_w - 1 * spatial_scale * src_w = 0 // dhctr / dcy = spatial_scale * src_h // dhdiff / dcy = 0 // dwctr / dsx = dwctr / dwe * dwe / dsx + dwctr / dws * dws / dsx // = 0.5 * ((pw+1)/pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // + 0.5 * ( pw /pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // = 0.5 * ((2*pw+1)/pooled_width - 1) * spatial_scale * src_w * exp(dsx) // = ((pw + 0.5) / pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // dwdiff / dsx = dwdiff / dwe * dwe / dsx + dwdiff / dws * dws / dsx // = 1 * ((pw+1)/pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // + (-1) * ( pw /pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // = (1 / pooled_width) * spatial_scale * src_w * exp(dsx) // dhctr / dsy = ((ph + 0.5) / pooled_height - 0.5) * spatial_scale * src_h * exp(dsy) // dhdiff / dsy = (1 / pooled_height) * spatial_scale * src_h * exp(dsy) // dgx / dwctr = (w >= wctr ? 1 : -1) // dgx / dwdiff = 1 // dgy / dhctr = (h >= hctr ? 1 : -1) // dgy / dhdiff = 1 // gx_final = gx / gx_all // dgx_final / dwctr = ( dgx/dwctr * gx_all - gx * dgx_all/dwctr ) / (gx_all)^2 = ( (w >= wctr ? 1 : -1) * gx_all - gx * sum_for_w{ (w >= wctr ? 1 : -1) } ) / gx_all^2 // dgx_final / dwdiff = ( dgx/dwdiff * gx_all - gx * dgx_all/dwdiff ) / (gx_all)^2 = ( 1 * gx_all - gx * sum_for_w{ 1 } ) / gx_all^2 // gy_final = gy / gy_all // dgy_final / dhctr = ... // dgy_final / dhdiff = ... // dgx_final / dcx = dgx_final / dwctr * dwctr / dcx + dgx_final / dwdiff * dwdiff / dcx // = ( (w >= wctr ? 1 : -1) * gx_all - gx * sum_for_w{ (w >= wctr ? 1 : -1) } ) / gx_all^2 * spatial_scale * src_w + (...) * 0 // = ( (w >= wctr ? 1 : -1) * gx_all - gx * sum_for_w{ (w >= wctr ? 1 : -1) } ) / gx_all^2 * spatial_scale * src_w // dgy_final / dcy = ( (h >= hctr ? 1 : -1) * gy_all - gy * sum_for_h{ (h >= hctr ? 1 : -1) } ) / gx_all^2 * spatial_scale * src_h // dgx_final / dsx = ( (w >= wctr ? 1 : -1) * gx_all - gx * sum_for_w{ (w >= wctr ? 1 : -1) } ) / gx_all^2 * ((pw + 0.5) - 0.5 * pooled_width) / pooled_width * spatial_scale * src_w * exp(dsx) + // ( 1 * gx_all - gx * sum_for_w{ 1 } ) / gx_all^2 * 1 / pooled_width * spatial_scale * src_w * exp(dsx) // dgy_final / dsy = ( (h >= hctr ? 1 : -1) * gy_all - gy * sum_for_h{ (h >= hctr ? 1 : -1) } ) / gy_all^2 * ((ph + 0.5) - 0.5 * pooled_height) / pooled_height * spatial_scale * src_h * exp(dsy) + // ( 1 * gy_all - gy * sum_for_h{ 1 } ) / gy_all^2 * 1 / pooled_height * spatial_scale * src_h * exp(dsy) // dg / dcx = dg / dgx_final * dgx_final / dcx + dg / dgy_final * dgy_final / dcx // = gy_final * dgx_final / dcx + gx_final * 0 // = gy_final * dgx_final / dcx // ... */ // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); // Define an empty pooling region to be zero Dtype val_cx = 0, val_cy = 0, val_sx = 0, val_sy = 0; Dtype gain_x = 0, gain_y = 0; Dtype pw_ = static_cast<Dtype>(pw); Dtype ph_ = static_cast<Dtype>(ph); Dtype pooled_width_ = static_cast<Dtype>(pooled_width); Dtype pooled_height_ = static_cast<Dtype>(pooled_height); bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype w_mask = 0, h_mask = 0, coeff_x = 0, coeff_y = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; Dtype w_ = w, h_ = h; gain_x = wdiff - abs((w_ - wctr)); gain_y = hdiff - abs((h_ - hctr)); w_mask = w_ >= wctr ? 1 : -1; h_mask = h_ >= hctr ? 1 : -1; //val_cx = val_cx + gain_y / gain_y_all * (w_mask * gain_x_all - gain_x * dgx_final_dwctr_all ) / (gain_x_all*gain_x_all) * spatial_scale * src_w * top_diff[index]; //val_cy = val_cy + gain_x / gain_x_all * (h_mask * gain_y_all - gain_y * dgy_final_dhctr_all ) / (gain_y_all*gain_y_all) * spatial_scale * src_h * top_diff[index]; //val_sx = val_sx + gain_y / gain_y_all *(( gain_x_all - gain_x * dgx_final_dwdiff_all) / (gain_x_all*gain_x_all) * (pw_+0.5-0.5*pooled_width) / pooled_width * spatial_scale * src_w * exp(dsx) + // (w_mask * gain_x_all - gain_x * dgx_final_dwctr_all ) / (gain_x_all*gain_x_all) * 1 / pooled_width * spatial_scale * src_w * exp(dsx) ) * top_diff[index]; //val_sy = val_sy + gain_x / gain_x_all *(( gain_y_all - gain_y * dgy_final_dhdiff_all) / (gain_y_all*gain_y_all) * (ph_+0.5-0.5*pooled_height)/ pooled_hidth * spatial_scale * src_h * eyp(dsy) + // (h_mask * gain_y_all - gain_y * dgy_final_dhctr_all ) / (gain_y_all*gain_y_all) * 1 / pooled_hidth * spatial_scale * src_h * eyp(dsy) ) * top_diff[index]; //if (gain_x > 1e-10 && gain_y > 1e-10) { coeff_x = bottom_data[bottom_index] * gain_y * spatial_scale * src_w * top_diff[index]; if (gain_x_all > 1e-10) {coeff_x = coeff_x / (gain_x_all*gain_x_all);} if (gain_y_all > 1e-10) {coeff_x = coeff_x / gain_y_all;} val_cx = val_cx + (w_mask * gain_x_all - gain_x * dgx_final_dwctr_all ) * coeff_x; val_sx = val_sx + ((w_mask * gain_x_all - gain_x * dgx_final_dwctr_all ) * (pw_+0.5-0.5*pooled_width_) + ( gain_x_all - gain_x * dgx_final_dwdiff_all)) / pooled_width_ * coeff_x * exp(dst_scl_x); coeff_y = bottom_data[bottom_index] * gain_x * spatial_scale * src_h * top_diff[index]; if (gain_y_all > 1e-10) {coeff_y = coeff_y / (gain_y_all*gain_y_all);} if (gain_x_all > 1e-10) {coeff_y = coeff_y / gain_x_all;} val_cy = val_cy + (h_mask * gain_y_all - gain_y * dgy_final_dhctr_all ) * coeff_y; val_sy = val_sy + ((h_mask * gain_y_all - gain_y * dgy_final_dhctr_all ) * (ph_+0.5-0.5*pooled_height_) + ( gain_y_all - gain_y * dgy_final_dhdiff_all)) / pooled_height_ * coeff_y * exp(dst_scl_y); //} } } /*int*/ buffer_index = n * (channels * pooled_height * pooled_width * 4) + c * (pooled_height * pooled_width * 4) + ph * (pooled_width * 4) + pw * 4; bottom_diff_delta_rois_buffer[buffer_index+0] = val_cx; bottom_diff_delta_rois_buffer[buffer_index+1] = val_cy; bottom_diff_delta_rois_buffer[buffer_index+2] = val_sx; bottom_diff_delta_rois_buffer[buffer_index+3] = val_sy; //} } } extern "C" void inn_ROIWarping_updateGradInputAtomic(THCState *state, THCudaTensor *gradInput_data, THCudaTensor *data, THCudaTensor *gradInput_delta_rois, THCudaTensor *delta_rois, THCudaTensor *gradInput_delta_rois_buffer, THCudaTensor *gradOutput, THCudaTensor *top_data_buffer, THCudaTensor* rois, int W, int H, double spatial_scale) { THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, top_data_buffer) == 5); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_nDimension(state, delta_rois) == 2 && delta_rois->size[1] == 5); THAssert(THCudaTensor_nDimension(state, rois) == THCudaTensor_nDimension(state, delta_rois) && rois->size[0] == delta_rois->size[0] && rois->size[1] == delta_rois->size[1]); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, top_data_buffer)); THAssert(THCudaTensor_isContiguous(state, rois)); THAssert(THCudaTensor_isContiguous(state, delta_rois)); long num_rois = rois->size[0]; long nInputPlane = data->size[1]; THCudaTensor_resizeAs(state, gradInput_data, data); THCudaTensor_zero(state, gradInput_data); THCudaTensor_resizeAs(state, gradInput_delta_rois, delta_rois); THCudaTensor_zero(state, gradInput_delta_rois); THCudaTensor_resize5d(state, gradInput_delta_rois_buffer, num_rois, nInputPlane, H, W, 4); THCudaTensor_zero(state, gradInput_delta_rois_buffer); //Backpropagation for data long count = THCudaTensor_nElement(state, gradInput_data); for (int nth_roi = 0; nth_roi < num_rois; ++nth_roi) { hipLaunchKernelGGL(( ROIWarpBackwardData<float>), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS / 2), 0, THCState_getCurrentStream(state), count, THCudaTensor_data(state, top_data_buffer), spatial_scale, nInputPlane, data->size[2], data->size[3], H, W, nth_roi, THCudaTensor_data(state, rois), THCudaTensor_data(state, delta_rois), THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, gradInput_data) ); } //Backpropagation for delta_roi count = THCudaTensor_nElement(state, gradOutput); hipLaunchKernelGGL(( ROIWarpBackwardDeltaROI<float>), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS / 2), 0, THCState_getCurrentStream(state), count, THCudaTensor_data(state, top_data_buffer), spatial_scale, nInputPlane, data->size[2], data->size[3], H, W, THCudaTensor_data(state, rois), THCudaTensor_data(state, delta_rois), THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, data), THCudaTensor_data(state, gradInput_delta_rois_buffer) ); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in inn_ROIWarping_updateGradInputAtomic: %s\n", hipGetErrorString(err)); THError("aborting"); } }
3b75c04ef7fd137a983949a2caa48d5d662ae0bd.cu
// ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ // Torch port: // IMAGINE, Sergey Zagoruyko, Francisco Massa, 2015 #include "THC.h" #include <algorithm> #include <cfloat> #include "assert.h" #include "common.h" using std::max; using std::min; template <typename Dtype> __global__ void ROIWarpForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, const Dtype* bottom_delta_rois, Dtype* top_data, Dtype* top_data_buffer) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = (bottom_rois[0] - 1); //int roi_start_w = round((bottom_rois[1] - 1) * spatial_scale); //int roi_start_h = round((bottom_rois[2] - 1)* spatial_scale); //int roi_end_w = round((bottom_rois[3] - 1) * spatial_scale); //int roi_end_h = round((bottom_rois[4] - 1) * spatial_scale); Dtype src_w = bottom_rois[3] - bottom_rois[1] + 1; Dtype src_h = bottom_rois[4] - bottom_rois[2] + 1; Dtype src_ctr_x = bottom_rois[1] + 0.5*(src_w-1.0); Dtype src_ctr_y = bottom_rois[2] + 0.5*(src_h-1.0); Dtype dst_ctr_x = bottom_delta_rois[1]; // dx (in fast-rcnn notation) = cx (in here) Dtype dst_ctr_y = bottom_delta_rois[2]; // dy (in fast-rcnn notation) = cy (in here) Dtype dst_scl_x = bottom_delta_rois[3]; // dw (in fast-rcnn notation) = sx (in here) Dtype dst_scl_y = bottom_delta_rois[4]; // dh (in fast-rcnn notation) = sy (in here) Dtype pred_ctr_x = dst_ctr_x * src_w + src_ctr_x; Dtype pred_ctr_y = dst_ctr_y * src_h + src_ctr_y; Dtype pred_w = exp(dst_scl_x) * src_w; Dtype pred_h = exp(dst_scl_y) * src_h; Dtype roi_start_w = ( (pred_ctr_x - 0.5*(pred_w-1)) - 1 ) * spatial_scale; Dtype roi_start_h = ( (pred_ctr_y - 0.5*(pred_h-1)) - 1 ) * spatial_scale; Dtype roi_end_w = ( (pred_ctr_x + 0.5*(pred_w-1)) - 1 ) * spatial_scale; Dtype roi_end_h = ( (pred_ctr_y + 0.5*(pred_h-1)) - 1 ) * spatial_scale; assert(roi_end_w - roi_start_w >= 0); assert(roi_end_h - roi_start_h >= 0); // Force malformed ROIs to be 1x1 Dtype roi_width = roi_end_w - roi_start_w + 1; Dtype roi_height = roi_end_h - roi_start_h + 1; Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype wstart_ = static_cast<Dtype>(pw) * bin_size_w + roi_start_w; Dtype hstart_ = static_cast<Dtype>(ph) * bin_size_h + roi_start_h; Dtype wend_ = static_cast<Dtype>(pw+1) * bin_size_w + roi_start_w; Dtype hend_ = static_cast<Dtype>(ph+1) * bin_size_h + roi_start_h; int wstart = static_cast<int>(floor(wstart_)); int hstart = static_cast<int>(floor(hstart_)); int wend = static_cast<int>( ceil(wend_)); int hend = static_cast<int>( ceil(hend_)); Dtype wctr = (wend_ + wstart_) * 0.5; // dwctr / dwe = 0.5; dwctr / dws = 0.5 Dtype hctr = (hend_ + hstart_) * 0.5; // dhctr / dhe = 0.5; dhctr / dhs = 0.5 Dtype wdiff = (wend_ - wstart_) + 1; // dwdiff / dwe = 1; dwdiff / dws = -1 Dtype hdiff = (hend_ - hstart_) + 1; // dhdiff / dhe = 1; dhdiff / dhs = -1 //top_data[index] = static_cast<Dtype>(hend-1-hstart)+1; //top_data[index] = hend; //wend; //top_data[index] = hstart+1; // wstart+1; //top_data[index] = wdiff; //top_data[index] = hctr+1; //top_data[index] = wctr+1; // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); //top_data[index] = hstart+1; //top_data[index] = wstart+1; // Auxilliary variables used in backprop Dtype w_mask = 0, h_mask = 0; Dtype dgx_final_dwctr_all = 0; Dtype dgx_final_dwdiff_all = 0; Dtype dgy_final_dhctr_all = 0; Dtype dgy_final_dhdiff_all = 0; // Define an empty pooling region to be zero Dtype val = 0; Dtype gain = 0, gain_x = 0, gain_y = 0, gain_x_all = 0, gain_y_all = 0; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { Dtype h_ = h; for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; Dtype w_ = w; //gain_x = max(0., 1 - abs( dst_ctr_x + static_cast<Dtype>(pw) / static_cast<Dtype>(pooled_width) * dst_scl_x - w )); -- in paper, but makes no sense //gain_y = max(0., 1 - abs( dst_ctr_y + static_cast<Dtype>(ph) / static_cast<Dtype>(pooled_height) * dst_scl_y - h)); gain_x = wdiff - abs((w_ - wctr)); gain_y = hdiff - abs((h_ - hctr)); gain = gain_x * gain_y; val = val + gain * bottom_data[bottom_index]; //val = val + gain; //val = val + 1; if (h == hstart) { gain_x_all = gain_x_all + gain_x; // Update information used in backprop w_mask = w_ >= wctr ? 1 : -1; dgx_final_dwctr_all = dgx_final_dwctr_all + w_mask; dgx_final_dwdiff_all = dgx_final_dwdiff_all + 1; } } gain_y_all = gain_y_all + gain_y; h_mask = h >= hctr ? 1 : -1; dgy_final_dhctr_all = dgy_final_dhctr_all + h_mask; dgy_final_dhdiff_all = dgy_final_dhdiff_all + 1; } if (gain_x_all > 1e-10) val = val / gain_x_all; if (gain_y_all > 1e-10) val = val / gain_y_all; top_data[index] = val; //top_data[index] = gain_x_all; //top_data[index] = gain_y_all; int buffer_index = n * (channels * pooled_height * pooled_width * 10) + c * (pooled_height * pooled_width * 10) + ph * (pooled_width * 10) + pw * 10; top_data_buffer[buffer_index+0] = wctr; top_data_buffer[buffer_index+1] = wdiff; top_data_buffer[buffer_index+2] = hctr; top_data_buffer[buffer_index+3] = hdiff; top_data_buffer[buffer_index+4] = gain_x_all; top_data_buffer[buffer_index+5] = gain_y_all; top_data_buffer[buffer_index+6] = dgx_final_dwctr_all; top_data_buffer[buffer_index+7] = dgy_final_dhctr_all; top_data_buffer[buffer_index+8] = dgx_final_dwdiff_all; top_data_buffer[buffer_index+9] = dgy_final_dhdiff_all; } } extern "C" void inn_ROIWarping_updateOutput(THCState *state, THCudaTensor *output, THCudaTensor *output_buffer, THCudaTensor *data, THCudaTensor* rois, THCudaTensor* delta_rois, int W, int H, double spatial_scale) { THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_nDimension(state, delta_rois) == 2 && delta_rois->size[1] == 5); THAssert(THCudaTensor_nDimension(state, rois) == THCudaTensor_nDimension(state, delta_rois) && rois->size[0] == delta_rois->size[0] && rois->size[1] == delta_rois->size[1]); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, rois)); THAssert(THCudaTensor_isContiguous(state, delta_rois)); long num_rois = rois->size[0]; long nInputPlane = data->size[1]; THCudaTensor_resize4d(state, output, num_rois, nInputPlane, H, W); THCudaTensor_resize5d(state, output_buffer, num_rois, nInputPlane, H, W, 10); THCudaTensor_zero(state, output_buffer); long count = THCudaTensor_nElement(state, output); ROIWarpForward<float><<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( count, THCudaTensor_data(state, data), spatial_scale, nInputPlane, data->size[2], data->size[3], H, W, THCudaTensor_data(state, rois), THCudaTensor_data(state, delta_rois), THCudaTensor_data(state, output), THCudaTensor_data(state, output_buffer) ); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in inn_ROIWarping_updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } } template <typename Dtype> __global__ void ROIWarpBackwardData(const int nthreads, const Dtype* top_data_buffer, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int nth_roi, const Dtype* bottom_rois, const Dtype* bottom_delta_rois, const Dtype* top_diff, Dtype* bottom_diff_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) is an element in the input int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; bottom_rois += nth_roi * 5; int roi_batch_ind = (bottom_rois[0] - 1); if (roi_batch_ind == n) { Dtype src_w = bottom_rois[3] - bottom_rois[1] + 1; Dtype src_h = bottom_rois[4] - bottom_rois[2] + 1; Dtype src_ctr_x = bottom_rois[1] + 0.5*(src_w-1.0); Dtype src_ctr_y = bottom_rois[2] + 0.5*(src_h-1.0); Dtype dst_ctr_x = bottom_delta_rois[1]; // dx (in fast-rcnn notation) = cx (in here) Dtype dst_ctr_y = bottom_delta_rois[2]; // dy (in fast-rcnn notation) = cy (in here) Dtype dst_scl_x = bottom_delta_rois[3]; // dw (in fast-rcnn notation) = sx (in here) Dtype dst_scl_y = bottom_delta_rois[4]; // dh (in fast-rcnn notation) = sy (in here) Dtype pred_ctr_x = dst_ctr_x * src_w + src_ctr_x; // dpcx / dcx = src_w Dtype pred_ctr_y = dst_ctr_y * src_h + src_ctr_y; // dpcy / dcy = src_h Dtype pred_w = exp(dst_scl_x) * src_w; // dpw / dsx = src_w * exp(dsx) Dtype pred_h = exp(dst_scl_y) * src_h; // dph / dsy = src_h * exp(dsy) Dtype roi_start_w = ( (pred_ctr_x - 0.5*(pred_w-1)) - 1 ) * spatial_scale; // drsw / dpcx = spatial_scale; drsw / dpw = -0.5 * spatial_scale Dtype roi_start_h = ( (pred_ctr_y - 0.5*(pred_h-1)) - 1 ) * spatial_scale; // drsh / dpcy = spatial_scale; drsh / dph = -0.5 * spatial_scale Dtype roi_end_w = ( (pred_ctr_x + 0.5*(pred_w-1)) - 1 ) * spatial_scale; // drew / dpcx = spatial_scale; drew / dpw = 0.5 * spatial_scale Dtype roi_end_h = ( (pred_ctr_y + 0.5*(pred_h-1)) - 1 ) * spatial_scale; // dreh / dpcy = spatial_scale; dreh / dph = 0.5 * spatial_scale assert(roi_end_w - roi_start_w >= 0); assert(roi_end_h - roi_start_h >= 0); Dtype roi_width = roi_end_w - roi_start_w + 1; Dtype roi_height = roi_end_h - roi_start_h + 1; Dtype bin_size_pw = static_cast<Dtype>(pooled_width) / roi_width; Dtype bin_size_ph = static_cast<Dtype>(pooled_height) / roi_height; int pwstart = static_cast<int>(floor(static_cast<Dtype>(-roi_start_w + w) * bin_size_pw)); int phstart = static_cast<int>(floor(static_cast<Dtype>(-roi_start_h + h) * bin_size_ph)); int pwend = static_cast<int>(ceil(static_cast<Dtype>(-roi_start_w + w+1) * bin_size_pw)); int phend = static_cast<int>(ceil(static_cast<Dtype>(-roi_start_h + h+1) * bin_size_ph)); //bottom_diff_data[index] = pwend; //phend; //bottom_diff_data[index] = pwstart+1; //phend; // Clip to top boundaries phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); Dtype w_ = w, h_ = h; Dtype wctr = 0, wdiff = 0, hctr = 0, hdiff = 0; Dtype gain = 0, gain_x = 0, gain_y = 0, gain_x_all = 0, gain_y_all = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int top_index = nth_roi * (channels * pooled_height * pooled_width) + c * (pooled_height * pooled_width) + ph * pooled_width + pw; int top_buffer_index = nth_roi * (channels * pooled_height * pooled_width * 10) + c * (pooled_height * pooled_width * 10) + ph * (pooled_width * 10) + pw * 10; wctr = top_data_buffer[top_buffer_index+0]; wdiff = top_data_buffer[top_buffer_index+1]; hctr = top_data_buffer[top_buffer_index+2]; hdiff = top_data_buffer[top_buffer_index+3]; gain_x_all = top_data_buffer[top_buffer_index+4]; gain_y_all = top_data_buffer[top_buffer_index+5]; gain_x = wdiff - abs((w_ - wctr)); // dgx / dwdiff = 1 // dgx / dwctr = 1 ( if w >= wctr ) // dgx / dwctr = - 1 ( else ) gain_y = hdiff - abs((h_ - hctr)); // dgy / dhdiff = 1 // dgy / dhctr = 1 ( if h >= hctr ) // dgy / dhctr = - 1 ( else ) if (gain_x_all > 1e-10) gain_x = gain_x / gain_x_all; if (gain_y_all > 1e-10) gain_y = gain_y / gain_y_all; gain = gain_x * gain_y; bottom_diff_data[index] = bottom_diff_data[index] + gain * top_diff[top_index]; //val = val + gain * bottom_data[bottom_index]; } } } } } template <typename Dtype> __global__ void ROIWarpBackwardDeltaROI(const int nthreads, const Dtype* top_data_buffer, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, const Dtype* bottom_delta_rois, const Dtype* top_diff, const Dtype* bottom_data, Dtype* bottom_diff_delta_rois_buffer) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int buffer_index = n * (channels * pooled_height * pooled_width * 10) + c * (pooled_height * pooled_width * 10) + ph * (pooled_width * 10) + pw * 10; Dtype wctr = top_data_buffer[buffer_index+0]; Dtype wdiff = top_data_buffer[buffer_index+1]; Dtype hctr = top_data_buffer[buffer_index+2]; Dtype hdiff = top_data_buffer[buffer_index+3]; Dtype gain_x_all = top_data_buffer[buffer_index+4]; Dtype gain_y_all = top_data_buffer[buffer_index+5]; Dtype dgx_final_dwctr_all = top_data_buffer[buffer_index+6]; Dtype dgy_final_dhctr_all = top_data_buffer[buffer_index+7]; Dtype dgx_final_dwdiff_all = top_data_buffer[buffer_index+8]; Dtype dgy_final_dhdiff_all = top_data_buffer[buffer_index+9]; //if (gain_x_all > 1e-10 && gain_y_all > 1e-10) { bottom_rois += n * 5; int roi_batch_ind = (bottom_rois[0] - 1); Dtype src_w = bottom_rois[3] - bottom_rois[1] + 1; Dtype src_h = bottom_rois[4] - bottom_rois[2] + 1; Dtype src_ctr_x = bottom_rois[1] + 0.5*(src_w-1.0); Dtype src_ctr_y = bottom_rois[2] + 0.5*(src_h-1.0); Dtype dst_ctr_x = bottom_delta_rois[1]; // dx (in fast-rcnn notation) = cx (in here) Dtype dst_ctr_y = bottom_delta_rois[2]; // dy (in fast-rcnn notation) = cy (in here) Dtype dst_scl_x = bottom_delta_rois[3]; // dw (in fast-rcnn notation) = sx (in here) Dtype dst_scl_y = bottom_delta_rois[4]; // dh (in fast-rcnn notation) = sy (in here) Dtype pred_ctr_x = dst_ctr_x * src_w + src_ctr_x; // dpcx / dcx = src_w Dtype pred_ctr_y = dst_ctr_y * src_h + src_ctr_y; // dpcy / dcy = src_h Dtype pred_w = exp(dst_scl_x) * src_w; // dpw / dsx = src_w * exp(dsx) Dtype pred_h = exp(dst_scl_y) * src_h; // dph / dsy = src_h * exp(dsy) Dtype roi_start_w = ( (pred_ctr_x - 0.5*(pred_w-1)) - 1 ) * spatial_scale; // drsw / dpcx = spatial_scale // drsw / dpw = -0.5 * spatial_scale Dtype roi_start_h = ( (pred_ctr_y - 0.5*(pred_h-1)) - 1 ) * spatial_scale; // drsh / dpcy = spatial_scale // drsh / dph = -0.5 * spatial_scale Dtype roi_end_w = ( (pred_ctr_x + 0.5*(pred_w-1)) - 1 ) * spatial_scale; // drew / dpcx = spatial_scale // drew / dpw = 0.5 * spatial_scale Dtype roi_end_h = ( (pred_ctr_y + 0.5*(pred_h-1)) - 1 ) * spatial_scale; // dreh / dpcy = spatial_scale // dreh / dph = 0.5 * spatial_scale assert(roi_end_w - roi_start_w >= 0); assert(roi_end_h - roi_start_h >= 0); // drsw / dcx = drsw / dpcx * dpcx / dcx = spatial_scale * src_w // drew / dcx = drew / dpcx * dpcx / dcx = spatial_scale * src_w // drsh / dcy = drsh / dpcy * dpcy / dcy = spatial_scale * src_h // dreh / dcy = dreh / dpcy * dpcy / dcy = spatial_scale * src_h // drsw / dsx = drsw / dpw * dpw / dsx = -0.5 * spatial_scale * src_w * exp(dsx) // drew / dsx = drew / dpw * dpw / dsx = 0.5 * spatial_scale * src_w * exp(dsx) // drsh / dsy = drsh / dph * dph / dsy = -0.5 * spatial_scale * src_h * exp(dsy) // dreh / dsy = dreh / dph * dph / dsy = 0.5 * spatial_scale * src_h * exp(dsy) // Force malformed ROIs to be 1x1 Dtype roi_width = roi_end_w - roi_start_w + 1; // drw / drew = 1 // drw / drsw = -1 Dtype roi_height = roi_end_h - roi_start_h + 1; // drh / dreh = 1 // drh / drsh = -1 // drw / dcx = drw / drew * drew / dcx + drw / drsw * drsw / dcx = drew / dcx - drsw / dcx // = spatial_scale * src_w - spatial_scale * src_w = 0 // drh / dcy = drh / dreh * dreh / dcy + drh / drsh * drsh / dcy = dreh / dcy - drsh / dcy = spatial_scale * src_h - spatial_scale * src_h = 0 // drw / dsx = drw / drew * drew / dsx + drw / drsw * drsw / dsx = drew / dsx - drsw / dsx = 0.5 * spatial_scale * src_w * exp(dsx) - (-0.5 * spatial_scale * src_w * exp(dsx)) = spatial_scale * src_w * exp(dsx) // drh / dsy = drh / dreh * dreh / dsy + drh / drsh * drsh / dsy = dreh / dsy - drsh / dsy = 0.5 * spatial_scale * src_h * exp(dsy) - (-0.5 * spatial_scale * src_h * exp(dsy)) = spatial_scale * src_h * exp(dsy) Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); // dbw / drw = 1 / pooled_width Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); // dbh / drh = 1 / pooled_height // dbw / dcx = dbw / drw * drw / dcx = 0 // dbh / dcy = dbh / drh * drh / dcy = 0 // dbw / dsx = dbw / drw * drw / dsx = 1 / pooled_width * spatial_scale * src_w * exp(dsx) // dbh / dsy = dbh / drh * drh / dsy = 1 / pooled_height * spatial_scale * src_h * exp(dsy) Dtype wstart_ = static_cast<Dtype>(pw) * bin_size_w + roi_start_w; // ws = f(rsw, rew) Dtype hstart_ = static_cast<Dtype>(ph) * bin_size_h + roi_start_h; // hw = f(rsh, reh) Dtype wend_ = static_cast<Dtype>(pw+1) * bin_size_w + roi_start_w; // we = f(rsw, rew) Dtype hend_ = static_cast<Dtype>(ph+1) * bin_size_h + roi_start_h; // he = f(rsh, reh) // dws / dbw = pw // dhs / dbh = ph // dwe / dbw = (pw+1) // dhe / dbh = (ph+1) int wstart = static_cast<int>(floor(wstart_)); int hstart = static_cast<int>(floor(hstart_)); int wend = static_cast<int>( ceil(wend_)); int hend = static_cast<int>( ceil(hend_)); // dws / dcx = dws / drsw * drsw / dcx + dws / drew * drew / dcx // = (dws / dbw * dbw / drsw + 1) * drsw / dcx + (dws / dbw * dbw / drew) * drew / dcx // = (pw * 1 / pooled_width * (-1) + 1) * spatial_scale * src_w // + (pw * 1 / pooled_width * ( 1) ) * spatial_scale * src_w // = spatial_scale * src_w // dwe / dcx = dwe / drsw * drsw / dcx + dwe / drew * drew / dcx // = (dwe / dbw * dbw / drsw + 1) * drsw / dcx + (dwe / dbw * dbw / drew) * drew / dcx // = ((pw+1) * 1 / pooled_width * (-1) + 1) * spatial_scale * src_w // + ((pw+1) * 1 / pooled_width * ( 1) ) * spatial_scale * src_w // = spatial_scale * src_w // dws / dsx = dws / drsw * drsw / dsx + dws / drew * drew / dsx // = (dws / dbw * dbw / drsw + 1) * drsw / dsx + (dws / dbw * dbw / drew) * drew / dsx // = (pw * 1 / pooled_width * (-1) + 1) * (-0.5 * spatial_scale * src_w * exp(dsx)) // + (pw * 1 / pooled_width * ( 1) ) * ( 0.5 * spatial_scale * src_w * exp(dsx)) // = (pw * 1 / pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // dwe / dsx = dwe / drsw * drsw / dsx + dwe / drew * drew / dsx // = (dwe / dbw * dbw / drsw + 1) * drsw / dsx + (dwe / dbw * dbw / drew) * drew / dsx // = ((pw+1) * 1 / pooled_width * (-1) + 1) * (-0.5 * spatial_scale * src_w * exp(dsx)) // + ((pw+1) * 1 / pooled_width * ( 1) ) * ( 0.5 * spatial_scale * src_w * exp(dsx)) // = ((pw+1) * 1 / pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // dws / dcy = spatial_scale * src_h // dwe / dcy = spatial_scale * src_h // dws / dsy = ( ph * 1 / pooled_height - 1) * spatial_scale * src_h * exp(dsy) // dwe / dsy = ((ph+1) * 1 / pooled_height - 1) * spatial_scale * src_h * exp(dsy) /* Dtype wctr = (wend_ + wstart_) * 0.5; // dwctr / dwe = 0.5; dwctr / dws = 0.5 Dtype hctr = (hend_ + hstart_) * 0.5; // dhctr / dhe = 0.5; dhctr / dhs = 0.5 Dtype wdiff = (wend_ - wstart_) + 1; // dwdiff / dwe = 1; dwdiff / dws = -1 Dtype hdiff = (hend_ - hstart_) + 1; // dhdiff / dhe = 1; dhdiff / dhs = -1 // dwctr / dcx = dwctr / dwe * dwe / dcx + dwctr / dws * dws / dcx = 0.5 * spatial_scale * src_w + 0.5 * spatial_scale * src_w = spatial_scale * src_w // dwdiff / dcx = dwdiff / dwe * dwe / dcx + dwdiff / dws * dws / dcx = 1 * spatial_scale * src_w - 1 * spatial_scale * src_w = 0 // dhctr / dcy = spatial_scale * src_h // dhdiff / dcy = 0 // dwctr / dsx = dwctr / dwe * dwe / dsx + dwctr / dws * dws / dsx // = 0.5 * ((pw+1)/pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // + 0.5 * ( pw /pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // = 0.5 * ((2*pw+1)/pooled_width - 1) * spatial_scale * src_w * exp(dsx) // = ((pw + 0.5) / pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // dwdiff / dsx = dwdiff / dwe * dwe / dsx + dwdiff / dws * dws / dsx // = 1 * ((pw+1)/pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // + (-1) * ( pw /pooled_width - 0.5) * spatial_scale * src_w * exp(dsx) // = (1 / pooled_width) * spatial_scale * src_w * exp(dsx) // dhctr / dsy = ((ph + 0.5) / pooled_height - 0.5) * spatial_scale * src_h * exp(dsy) // dhdiff / dsy = (1 / pooled_height) * spatial_scale * src_h * exp(dsy) // dgx / dwctr = (w >= wctr ? 1 : -1) // dgx / dwdiff = 1 // dgy / dhctr = (h >= hctr ? 1 : -1) // dgy / dhdiff = 1 // gx_final = gx / gx_all // dgx_final / dwctr = ( dgx/dwctr * gx_all - gx * dgx_all/dwctr ) / (gx_all)^2 = ( (w >= wctr ? 1 : -1) * gx_all - gx * sum_for_w{ (w >= wctr ? 1 : -1) } ) / gx_all^2 // dgx_final / dwdiff = ( dgx/dwdiff * gx_all - gx * dgx_all/dwdiff ) / (gx_all)^2 = ( 1 * gx_all - gx * sum_for_w{ 1 } ) / gx_all^2 // gy_final = gy / gy_all // dgy_final / dhctr = ... // dgy_final / dhdiff = ... // dgx_final / dcx = dgx_final / dwctr * dwctr / dcx + dgx_final / dwdiff * dwdiff / dcx // = ( (w >= wctr ? 1 : -1) * gx_all - gx * sum_for_w{ (w >= wctr ? 1 : -1) } ) / gx_all^2 * spatial_scale * src_w + (...) * 0 // = ( (w >= wctr ? 1 : -1) * gx_all - gx * sum_for_w{ (w >= wctr ? 1 : -1) } ) / gx_all^2 * spatial_scale * src_w // dgy_final / dcy = ( (h >= hctr ? 1 : -1) * gy_all - gy * sum_for_h{ (h >= hctr ? 1 : -1) } ) / gx_all^2 * spatial_scale * src_h // dgx_final / dsx = ( (w >= wctr ? 1 : -1) * gx_all - gx * sum_for_w{ (w >= wctr ? 1 : -1) } ) / gx_all^2 * ((pw + 0.5) - 0.5 * pooled_width) / pooled_width * spatial_scale * src_w * exp(dsx) + // ( 1 * gx_all - gx * sum_for_w{ 1 } ) / gx_all^2 * 1 / pooled_width * spatial_scale * src_w * exp(dsx) // dgy_final / dsy = ( (h >= hctr ? 1 : -1) * gy_all - gy * sum_for_h{ (h >= hctr ? 1 : -1) } ) / gy_all^2 * ((ph + 0.5) - 0.5 * pooled_height) / pooled_height * spatial_scale * src_h * exp(dsy) + // ( 1 * gy_all - gy * sum_for_h{ 1 } ) / gy_all^2 * 1 / pooled_height * spatial_scale * src_h * exp(dsy) // dg / dcx = dg / dgx_final * dgx_final / dcx + dg / dgy_final * dgy_final / dcx // = gy_final * dgx_final / dcx + gx_final * 0 // = gy_final * dgx_final / dcx // ... */ // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); // Define an empty pooling region to be zero Dtype val_cx = 0, val_cy = 0, val_sx = 0, val_sy = 0; Dtype gain_x = 0, gain_y = 0; Dtype pw_ = static_cast<Dtype>(pw); Dtype ph_ = static_cast<Dtype>(ph); Dtype pooled_width_ = static_cast<Dtype>(pooled_width); Dtype pooled_height_ = static_cast<Dtype>(pooled_height); bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype w_mask = 0, h_mask = 0, coeff_x = 0, coeff_y = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; Dtype w_ = w, h_ = h; gain_x = wdiff - abs((w_ - wctr)); gain_y = hdiff - abs((h_ - hctr)); w_mask = w_ >= wctr ? 1 : -1; h_mask = h_ >= hctr ? 1 : -1; //val_cx = val_cx + gain_y / gain_y_all * (w_mask * gain_x_all - gain_x * dgx_final_dwctr_all ) / (gain_x_all*gain_x_all) * spatial_scale * src_w * top_diff[index]; //val_cy = val_cy + gain_x / gain_x_all * (h_mask * gain_y_all - gain_y * dgy_final_dhctr_all ) / (gain_y_all*gain_y_all) * spatial_scale * src_h * top_diff[index]; //val_sx = val_sx + gain_y / gain_y_all *(( gain_x_all - gain_x * dgx_final_dwdiff_all) / (gain_x_all*gain_x_all) * (pw_+0.5-0.5*pooled_width) / pooled_width * spatial_scale * src_w * exp(dsx) + // (w_mask * gain_x_all - gain_x * dgx_final_dwctr_all ) / (gain_x_all*gain_x_all) * 1 / pooled_width * spatial_scale * src_w * exp(dsx) ) * top_diff[index]; //val_sy = val_sy + gain_x / gain_x_all *(( gain_y_all - gain_y * dgy_final_dhdiff_all) / (gain_y_all*gain_y_all) * (ph_+0.5-0.5*pooled_height)/ pooled_hidth * spatial_scale * src_h * eyp(dsy) + // (h_mask * gain_y_all - gain_y * dgy_final_dhctr_all ) / (gain_y_all*gain_y_all) * 1 / pooled_hidth * spatial_scale * src_h * eyp(dsy) ) * top_diff[index]; //if (gain_x > 1e-10 && gain_y > 1e-10) { coeff_x = bottom_data[bottom_index] * gain_y * spatial_scale * src_w * top_diff[index]; if (gain_x_all > 1e-10) {coeff_x = coeff_x / (gain_x_all*gain_x_all);} if (gain_y_all > 1e-10) {coeff_x = coeff_x / gain_y_all;} val_cx = val_cx + (w_mask * gain_x_all - gain_x * dgx_final_dwctr_all ) * coeff_x; val_sx = val_sx + ((w_mask * gain_x_all - gain_x * dgx_final_dwctr_all ) * (pw_+0.5-0.5*pooled_width_) + ( gain_x_all - gain_x * dgx_final_dwdiff_all)) / pooled_width_ * coeff_x * exp(dst_scl_x); coeff_y = bottom_data[bottom_index] * gain_x * spatial_scale * src_h * top_diff[index]; if (gain_y_all > 1e-10) {coeff_y = coeff_y / (gain_y_all*gain_y_all);} if (gain_x_all > 1e-10) {coeff_y = coeff_y / gain_x_all;} val_cy = val_cy + (h_mask * gain_y_all - gain_y * dgy_final_dhctr_all ) * coeff_y; val_sy = val_sy + ((h_mask * gain_y_all - gain_y * dgy_final_dhctr_all ) * (ph_+0.5-0.5*pooled_height_) + ( gain_y_all - gain_y * dgy_final_dhdiff_all)) / pooled_height_ * coeff_y * exp(dst_scl_y); //} } } /*int*/ buffer_index = n * (channels * pooled_height * pooled_width * 4) + c * (pooled_height * pooled_width * 4) + ph * (pooled_width * 4) + pw * 4; bottom_diff_delta_rois_buffer[buffer_index+0] = val_cx; bottom_diff_delta_rois_buffer[buffer_index+1] = val_cy; bottom_diff_delta_rois_buffer[buffer_index+2] = val_sx; bottom_diff_delta_rois_buffer[buffer_index+3] = val_sy; //} } } extern "C" void inn_ROIWarping_updateGradInputAtomic(THCState *state, THCudaTensor *gradInput_data, THCudaTensor *data, THCudaTensor *gradInput_delta_rois, THCudaTensor *delta_rois, THCudaTensor *gradInput_delta_rois_buffer, THCudaTensor *gradOutput, THCudaTensor *top_data_buffer, THCudaTensor* rois, int W, int H, double spatial_scale) { THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, top_data_buffer) == 5); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_nDimension(state, delta_rois) == 2 && delta_rois->size[1] == 5); THAssert(THCudaTensor_nDimension(state, rois) == THCudaTensor_nDimension(state, delta_rois) && rois->size[0] == delta_rois->size[0] && rois->size[1] == delta_rois->size[1]); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, top_data_buffer)); THAssert(THCudaTensor_isContiguous(state, rois)); THAssert(THCudaTensor_isContiguous(state, delta_rois)); long num_rois = rois->size[0]; long nInputPlane = data->size[1]; THCudaTensor_resizeAs(state, gradInput_data, data); THCudaTensor_zero(state, gradInput_data); THCudaTensor_resizeAs(state, gradInput_delta_rois, delta_rois); THCudaTensor_zero(state, gradInput_delta_rois); THCudaTensor_resize5d(state, gradInput_delta_rois_buffer, num_rois, nInputPlane, H, W, 4); THCudaTensor_zero(state, gradInput_delta_rois_buffer); //Backpropagation for data long count = THCudaTensor_nElement(state, gradInput_data); for (int nth_roi = 0; nth_roi < num_rois; ++nth_roi) { ROIWarpBackwardData<float><<<GET_BLOCKS(count), CUDA_NUM_THREADS / 2, 0, THCState_getCurrentStream(state)>>>( count, THCudaTensor_data(state, top_data_buffer), spatial_scale, nInputPlane, data->size[2], data->size[3], H, W, nth_roi, THCudaTensor_data(state, rois), THCudaTensor_data(state, delta_rois), THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, gradInput_data) ); } //Backpropagation for delta_roi count = THCudaTensor_nElement(state, gradOutput); ROIWarpBackwardDeltaROI<float><<<GET_BLOCKS(count), CUDA_NUM_THREADS / 2, 0, THCState_getCurrentStream(state)>>>( count, THCudaTensor_data(state, top_data_buffer), spatial_scale, nInputPlane, data->size[2], data->size[3], H, W, THCudaTensor_data(state, rois), THCudaTensor_data(state, delta_rois), THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, data), THCudaTensor_data(state, gradInput_delta_rois_buffer) ); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in inn_ROIWarping_updateGradInputAtomic: %s\n", cudaGetErrorString(err)); THError("aborting"); } }
cc19c17d3f66d33fb61cf3285d1fa7cbc73e803e.hip
// !!! This is a file automatically generated by hipify!!! #include "vars.h" #include "cuda_runtime.hpp" #include <hip/hip_runtime.h> extern int neuron; extern int layer; extern int batch; extern int input; extern float bias; extern int **csrdispl; extern INDPREC **csrindex; extern VALPREC **csrvalue; extern FEATPREC *currfeat; extern FEATPREC *nextfeat; extern int *active; extern int *categories; extern int *globalcategories; extern int myid; extern int numproc; extern int numthreads; extern int *numbatch; extern int *batchdispl; extern int mybatch; extern int extbatch; extern Duration timekernel; extern Duration timecopy; int **csrdispl_d; INDPREC **csrindex_d; VALPREC **csrvalue_d; int **buffdispl; int **mapdispl; int **warpdispl; MAPPREC **map; INDPREC **warpindex; VALPREC **warpvalue; int **buffdispl_d; int **mapdispl_d; int **warpdispl_d; MAPPREC *mapbuff_d; INDPREC *indbuff_d; VALPREC *valbuff_d;; #ifdef OUTOFCORE int weightsizemax; int mapsizemax; #ifdef OVERLAP MAPPREC *mapstream_d; INDPREC *indstream_d; VALPREC *valstream_d; #endif #else MAPPREC **map_d; INDPREC **warpindex_d; VALPREC **warpvalue_d; #endif FEATPREC *currfeat_d; FEATPREC *nextfeat_d; int *active_d; int *categories_d; int blocksize; int numblocks; int numwarp; int buffsize; hipEvent_t copystart, copystop; hipEvent_t kernelstart, kernelstop; hipStream_t copystream; hipStream_t kernelstream; float elapsedTime; __device__ float __ReLU(float x){ return x<0.0?0.0:x>32.0?32.0:x; }; __global__ void __launch_bounds__(256,4) dummy_kernel(FEATPREC *nextfeat, FEATPREC *currfeat, int buffsize, int *buffdispl, int *mapdispl, MAPPREC *map, int *displ, INDPREC *index, VALPREC *value, float bias , int neuron, int *categories, int *active){ extern __shared__ float shared[]; int wind = threadIdx.x%WARPSIZE; float reduce[MINIBATCH] = {0.0}; for(int buff = buffdispl[blockIdx.x]; buff < buffdispl[blockIdx.x+1]; buff++){ int mapnz = mapdispl[buff+1]-mapdispl[buff]; for(int n = threadIdx.x; n < mapnz; n += blockDim.x){ int ind = map[mapdispl[buff]+n]; for(unsigned int f = 0; f < MINIBATCH; f++) shared[f*buffsize+n] = currfeat[categories[blockIdx.y*MINIBATCH+f]* (unsigned int) neuron+ind]; } __syncthreads(); int warp = (buff*blockDim.x+threadIdx.x)/WARPSIZE; for(int m = displ[warp]; m < displ[warp+1]; m++){ int ind = index[m*WARPSIZE+wind]; float val = value[m*WARPSIZE+wind]; for(int f = 0; f < MINIBATCH; f++) reduce[f] += shared[f*buffsize+ind]*val; } __syncthreads(); } int m = blockIdx.x*blockDim.x+threadIdx.x; for(int f = 0; f < MINIBATCH; f++) if(nextfeat[(blockIdx.y*MINIBATCH+f)*neuron+m]=__ReLU(reduce[f]+bias)) atomicAdd(active+blockIdx.y*MINIBATCH+f,1); }; void setup_gpu(){ OR_FATAL(hipSetDevice(myid%6)); printf("myid %d mydevice %d\n",myid,myid%4); OR_FATAL(hipFuncSetAttribute(dummy_kernel,hipFuncAttributeMaxDynamicSharedMemorySize,98304)); if(myid==0){ int deviceCount; OR_FATAL(hipGetDeviceCount(&deviceCount)); printf("\n"); printf("Device Count: %d\n",deviceCount); int dev = 0; hipDeviceProp_t deviceProp; OR_FATAL(hipGetDeviceProperties(&deviceProp, dev)); printf("Device %d name: %s\n",dev,deviceProp.name); printf("Computational Capabilities: %d, %d\n",deviceProp.major,deviceProp.minor); printf("Maximum global memory size: %lu\n",deviceProp.totalGlobalMem); printf("Maximum constant memory size: %lu\n",deviceProp.totalConstMem); printf("Maximum shared memory size per block: %lu\n",deviceProp.sharedMemPerBlock); printf("Maximum block dimensions: %dx%dx%d\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]); printf("Maximum grid dimensions: %dx%dx%d\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]); printf("Maximum threads per block: %d\n",deviceProp.maxThreadsPerBlock); printf("Warp size: %d\n",deviceProp.warpSize); printf("\n"); } OR_FATAL(hipEventCreate(&kernelstart)); OR_FATAL(hipEventCreate(&kernelstop)); OR_FATAL(hipEventCreate(&copystart)); OR_FATAL(hipEventCreate(&copystop)); OR_FATAL(hipStreamCreate(&copystream)); OR_FATAL(hipStreamCreate(&kernelstream)); char *chartemp; chartemp = getenv("BLOCKSIZE"); blocksize = atoi(chartemp); chartemp = getenv("BUFFER"); buffsize = atoi(chartemp)*1024/sizeof(float)/MINIBATCH; numblocks = neuron/blocksize; numwarp = blocksize/WARPSIZE; if(myid==0){ printf("MINIBATCH SIZE: %d\n",MINIBATCH); printf("BLOCK SIZE: %d\n",blocksize); printf("WARP SIZE: %d\n",WARPSIZE); printf("NUM BLOCKS: %d\n",numblocks); printf("NUMWARPS: %d\n",numwarp); printf("BUFFER SIZE: %d (%f KB) PER FEATURE: %d (%f KB)\n",buffsize*MINIBATCH,buffsize*sizeof(float)/1024.0*MINIBATCH,buffsize,buffsize*sizeof(float)/1024.0); printf("\n"); } preproc(); double memother = 0.0; OR_FATAL(hipHostMalloc((void**)&globalcategories,sizeof(int)*mybatch)); OR_FATAL(hipHostMalloc((void**)&categories,sizeof(int)*mybatch)); OR_FATAL(hipHostMalloc((void**)&active,sizeof(int)*mybatch)); OR_FATAL(hipMalloc((void**)&active_d,sizeof(int)*extbatch)); OR_FATAL(hipMalloc((void**)&categories_d,sizeof(int)*extbatch)); memother += sizeof(int)*extbatch/1.0e9; memother += sizeof(int)*extbatch/1.0e9; for(int k = 0; k < mybatch; k++){ active[k] = neuron; categories[k] = k; globalcategories[k] = batchdispl[myid]+k; } OR_FATAL(hipMemset(active_d,0,sizeof(int)*extbatch)); OR_FATAL(hipMemset(categories_d,0,sizeof(int)*extbatch)); OR_FATAL(hipMemcpy(active_d,active,sizeof(int)*mybatch,hipMemcpyHostToDevice)); OR_FATAL(hipMemcpy(categories_d,categories,sizeof(int)*mybatch,hipMemcpyHostToDevice)); #ifdef OUTOFCORE if(myid==0)printf("OUT OF CORE IS ENABLED\n"); #ifdef OVERLAP if(myid==0)printf("OVERLAPPING IS ENABLED\n"); #else if(myid==0)printf("OVERLAPPING IS DISABLED\n"); #endif #else if(myid==0)printf("OUT OF CORE IS DISABLED\n"); #endif double memweight = 0.0; double memdispl = 0.0; double memmap = 0.0; buffdispl_d = new int*[layer]; mapdispl_d = new int*[layer]; warpdispl_d = new int*[layer]; #ifdef OUTOFCORE weightsizemax = 0; mapsizemax = 0; #else map_d = new MAPPREC*[layer]; warpindex_d = new INDPREC*[layer]; warpvalue_d = new VALPREC*[layer]; #endif for(int l = 0; l < layer; l++){ OR_FATAL(hipMalloc((void**)&buffdispl_d[l],sizeof(int)*(numblocks+1))); OR_FATAL(hipMalloc((void**)&mapdispl_d[l],sizeof(int)*(buffdispl[l][numblocks]+1))); OR_FATAL(hipMalloc((void**)&warpdispl_d[l],sizeof(int)*(buffdispl[l][numblocks]*numwarp+1))); memdispl += sizeof(int)*(numblocks+1)/1.0e9; memdispl += sizeof(int)*(buffdispl[l][numblocks]+1)/1.0e9; memdispl += sizeof(int)*(buffdispl[l][numblocks]*numwarp+1)/1.0e9; OR_FATAL(hipMemcpy(buffdispl_d[l],buffdispl[l],sizeof(int)*(numblocks+1),hipMemcpyHostToDevice)); OR_FATAL(hipMemcpy(mapdispl_d[l],mapdispl[l],sizeof(int)*(buffdispl[l][numblocks]+1),hipMemcpyHostToDevice)); OR_FATAL(hipMemcpy(warpdispl_d[l],warpdispl[l],sizeof(int)*(buffdispl[l][numblocks]*numwarp+1),hipMemcpyHostToDevice)); #ifdef OUTOFCORE int mapsize = mapdispl[l][buffdispl[l][numblocks]]; if(mapsize > mapsizemax) mapsizemax = mapsize; int weightsize = warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; if(weightsize > weightsizemax) weightsizemax = weightsize; #else OR_FATAL(hipMalloc((void**)&map_d[l],sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]]))); OR_FATAL(hipMalloc((void**)&warpindex_d[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE)); OR_FATAL(hipMalloc((void**)&warpvalue_d[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE)); memmap += sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]])/1.0e9; memweight += sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE/1.0e9; memweight += sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE/1.0e9; OR_FATAL(hipMemcpy(map_d[l],map[l],sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]]),hipMemcpyHostToDevice)); OR_FATAL(hipMemcpy(warpindex_d[l],warpindex[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice)); OR_FATAL(hipMemcpy(warpvalue_d[l],warpvalue[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice)); #endif } #ifdef OUTOFCORE if(myid==0)printf("\n"); if(myid==0)printf("mapsizemax: %d (%f KB)\n",mapsizemax,sizeof(MAPPREC)*mapsizemax/1.0e6); if(myid==0)printf("weightsizemax: %d (%f KB)\n",weightsizemax,(sizeof(INDPREC)+sizeof(VALPREC))*weightsizemax/1.0e6); #ifdef OVERLAP OR_FATAL(hipMalloc((void**)&mapstream_d,sizeof(MAPPREC)*mapsizemax*2)); OR_FATAL(hipMalloc((void**)&indstream_d,sizeof(INDPREC)*weightsizemax*2)); OR_FATAL(hipMalloc((void**)&valstream_d,sizeof(VALPREC)*weightsizemax*2)); memmap += 2*sizeof(MAPPREC)*mapsizemax/1.0e9; memweight += 2*sizeof(INDPREC)*weightsizemax/1.0e9; memweight += 2*sizeof(VALPREC)*weightsizemax/1.0e9; OR_FATAL(hipMemcpy(mapstream_d,map[0],sizeof(MAPPREC)*mapdispl[0][buffdispl[0][numblocks]],hipMemcpyHostToDevice)); OR_FATAL(hipMemcpy(indstream_d,warpindex[0],sizeof(INDPREC)*warpdispl[0][buffdispl[0][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice)); OR_FATAL(hipMemcpy(valstream_d,warpvalue[0],sizeof(VALPREC)*warpdispl[0][buffdispl[0][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice)); #else OR_FATAL(hipMalloc((void**)&mapbuff_d,sizeof(MAPPREC)*mapsizemax)); OR_FATAL(hipMalloc((void**)&indbuff_d,sizeof(INDPREC)*weightsizemax)); OR_FATAL(hipMalloc((void**)&valbuff_d,sizeof(VALPREC)*weightsizemax)); memmap += sizeof(MAPPREC)*mapsizemax/1.0e9; memweight += sizeof(INDPREC)*weightsizemax/1.0e9; memweight += sizeof(VALPREC)*weightsizemax/1.0e9; #endif #endif double memfeat = 0.0; fprintf(stderr, "extbatch=%d, neuron=%d\n", extbatch, neuron); { const size_t bytes = sizeof(FEATPREC) * size_t(extbatch) * size_t(neuron); fflush(stdout); fprintf(stderr, "hipMalloc %lu MB\n", bytes/1024/1024); if (hipSuccess != hipMalloc((void**)&currfeat_d,bytes)) { fprintf(stderr, "ERROR: need more GPU memory\n"); exit(EXIT_FAILURE); } fprintf(stderr, "hipMalloc %lu MB\n", bytes/1024/1024); if (hipSuccess != hipMalloc((void**)&nextfeat_d,bytes)) { fprintf(stderr, "ERROR: need more GPU memory\n"); exit(EXIT_FAILURE); } memfeat += bytes/1.0e9; memfeat += bytes/1.0e9; OR_FATAL(hipMemset(currfeat_d,0,bytes)); OR_FATAL(hipMemset(nextfeat_d,0,bytes)); OR_FATAL(hipMemcpy(currfeat_d,currfeat,bytes,hipMemcpyHostToDevice)); } double memothers[numproc]; double memweights[numproc]; double memdispls[numproc]; double memmaps[numproc]; double memfeats[numproc]; // MPI_Allgather(&memother,1,MPI_DOUBLE,memothers,1,MPI_DOUBLE,MPI_COMM_WORLD); // MPI_Allgather(&memweight,1,MPI_DOUBLE,memweights,1,MPI_DOUBLE,MPI_COMM_WORLD); // MPI_Allgather(&memdispl,1,MPI_DOUBLE,memdispls,1,MPI_DOUBLE,MPI_COMM_WORLD); // MPI_Allgather(&memmap,1,MPI_DOUBLE,memmaps,1,MPI_DOUBLE,MPI_COMM_WORLD); // MPI_Allgather(&memfeat,1,MPI_DOUBLE,memfeats,1,MPI_DOUBLE,MPI_COMM_WORLD); memothers[0] = memother; memweights[0] = memweight; memdispls[0] = memdispl; memmaps[0] = memmap; memfeats[0] = memfeat; if(myid==0){ double memmax = 0.0; printf("\n"); for(int p = 0; p < numproc; p++){ double memtot = memdispls[p]+memmaps[p]+memweights[p]+memfeats[p]; printf("GPU %d: OTHERS: %f DISPLS: %f MAPS: %f WEIGHTS: %f FEATURES: %f TOTAL: %f GB\n",p,memothers[p],memdispls[p],memmaps[p],memweights[p],memfeats[p],memtot); if(memtot>memmax)memmax=memtot; } printf("MAX GPU MEM: %f GB\n",memmax); } } /* Simultaneously launch the kernel and copy weights for the next layer. Two streams: kernelStream and copyStream. kernelStream contains the kernel, as well as the associated memset, and bookkeeping operations copyStream just has the copy operations for the next layer use copyStart / copyStop events to time the stream, and start/stop events to time the kernel */ void infer_gpu(int l){ /* if OUTOFCORE and OVERLAP, point at the right part of the double-buffer to get the weights from the previous iteration if OUTOFCORE and !OVERLAP, copy arguments into the kernel otherwise, just get the right layer pointers */ #ifdef OUTOFCORE #ifdef OVERLAP mapbuff_d = mapstream_d+(l%2)*mapsizemax; indbuff_d = indstream_d+(l%2)*weightsizemax; valbuff_d = valstream_d+(l%2)*weightsizemax; OR_FATAL(hipStreamSynchronize(copystream)); #else OR_FATAL(hipEventRecord(copystart,kernelstream)); int weightsize = warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; OR_FATAL(hipMemcpyAsync(indbuff_d,warpindex[l],sizeof(INDPREC)*weightsize,hipMemcpyHostToDevice,kernelstream)); OR_FATAL(hipMemcpyAsync(valbuff_d,warpvalue[l],sizeof(VALPREC)*weightsize,hipMemcpyHostToDevice,kernelstream)); int mapsize = mapdispl[l][buffdispl[l][numblocks]]; OR_FATAL(hipMemcpyAsync(mapbuff_d,map[l],sizeof(MAPPREC)*mapsize,hipMemcpyHostToDevice,kernelstream)); OR_FATAL(hipEventRecord(copystop,kernelstream)); #endif #else mapbuff_d = map_d[l]; indbuff_d = warpindex_d[l]; valbuff_d = warpvalue_d[l]; #endif dim3 block(blocksize); dim3 grid(numblocks,(mybatch+MINIBATCH-1)/MINIBATCH); // initialize active features in the batch OR_FATAL(hipMemsetAsync(active_d,0,sizeof(int)*mybatch,kernelstream)); OR_FATAL(hipEventRecord(kernelstart,kernelstream)); hipLaunchKernelGGL(( dummy_kernel), dim3(grid),dim3(block),sizeof(float)*buffsize*MINIBATCH,kernelstream, nextfeat_d,currfeat_d,buffsize,buffdispl_d[l],mapdispl_d[l],mapbuff_d,warpdispl_d[l],indbuff_d,valbuff_d,bias,neuron,categories_d,active_d); OR_FATAL(hipEventRecord(kernelstop,kernelstream)); OR_FATAL(hipMemcpyAsync(active,active_d,sizeof(int)*mybatch,hipMemcpyDeviceToHost,kernelstream)); #ifdef OUTOFCORE #ifdef OVERLAP if(l+1 < layer){ OR_FATAL(hipMemcpyAsync(mapstream_d+((l+1)%2)*mapsizemax,map[l+1],sizeof(MAPPREC)*mapdispl[l+1][buffdispl[l+1][numblocks]],hipMemcpyHostToDevice,copystream)); OR_FATAL(hipMemcpyAsync(indstream_d+((l+1)%2)*weightsizemax,warpindex[l+1],sizeof(INDPREC)*warpdispl[l+1][buffdispl[l+1][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice,copystream)); OR_FATAL(hipMemcpyAsync(valstream_d+((l+1)%2)*weightsizemax,warpvalue[l+1],sizeof(VALPREC)*warpdispl[l+1][buffdispl[l+1][numblocks]*numwarp]*WARPSIZE,hipMemcpyHostToDevice,copystream)); } #else OR_FATAL(hipEventElapsedTime(&elapsedTime,copystart,copystop)); timecopy += elapsedTime/1.0e3; #endif #endif OR_FATAL(hipStreamSynchronize(kernelstream)); int feature = 0; for(int k = 0; k < mybatch; k++) if(active[k]){ globalcategories[feature] = globalcategories[k]; categories[feature] = k; feature++; } mybatch = feature; OR_FATAL(hipMemcpyAsync(categories_d,categories,sizeof(int)*feature,hipMemcpyHostToDevice,kernelstream)); OR_FATAL(hipEventElapsedTime(&elapsedTime,kernelstart,kernelstop)); timekernel += std::chrono::duration<float, std::milli>(elapsedTime); FEATPREC *tempfeat_d = currfeat_d; currfeat_d = nextfeat_d; nextfeat_d = tempfeat_d; //int allfeature = 0; //MPI_Allreduce(&feature,&allfeature,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); //if(myid==0)printf("layer %d features %d\n",l,allfeature); }; void preproc(){ buffdispl = new int*[layer]; mapdispl = new int*[layer]; warpdispl = new int*[layer]; map = new MAPPREC*[layer]; warpindex = new INDPREC*[layer]; warpvalue = new VALPREC*[layer]; int totbuff = 0; int totmapnz = 0; int totwarpnz = 0; int *temptag = new int[neuron*numthreads]; for(int l = 0; l < layer; l++){ //if(myid==0)printf("preprocessing layer %d\n",l); int *numbuff = new int[numblocks]; buffdispl[l] = new int[numblocks+1]; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++){ if(temp[n]) footprint++; } numbuff[b] = (footprint+buffsize-1)/buffsize; } buffdispl[l][0] = 0; for(int b = 0; b < numblocks; b++) buffdispl[l][b+1] = buffdispl[l][b]+numbuff[b]; totbuff += buffdispl[l][numblocks]; int *warpnz = new int[buffdispl[l][numblocks]*numwarp]; #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]*numwarp; n++) warpnz[n] = 0; int *mapnz = new int[buffdispl[l][numblocks]]; #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]; n++) mapnz[n] = 0; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++) if(temp[n]){ int buff = footprint/buffsize; mapnz[buffdispl[l][b]+buff]++; temp[n] = buff; footprint++; } for(int buff = 0; buff < numbuff[b]; buff++) for(int warp = 0; warp < numwarp; warp++){ int tempnz[WARPSIZE] = {0}; for(int t = 0; t < WARPSIZE; t++) for(int n = csrdispl[l][b*blocksize+warp*WARPSIZE+t]; n < csrdispl[l][b*blocksize+warp*WARPSIZE+t+1]; n++) if(temp[csrindex[l][n]]==buff) tempnz[t]++; int warpmax = 0; for(int t = 0; t < WARPSIZE; t++) if(tempnz[t]>warpmax) warpmax = tempnz[t]; warpnz[(buffdispl[l][b]+buff)*numwarp+warp] = warpmax; } } warpdispl[l] = new int[buffdispl[l][numblocks]*numwarp+1]; warpdispl[l][0] = 0; for(int warp = 0; warp < buffdispl[l][numblocks]*numwarp; warp++) warpdispl[l][warp+1] = warpdispl[l][warp]+warpnz[warp]; totwarpnz += warpdispl[l][buffdispl[l][numblocks]*numwarp]; OR_FATAL(hipHostMalloc((void**)&warpindex[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE)); OR_FATAL(hipHostMalloc((void**)&warpvalue[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE)); #pragma omp parallel for for(int n = 0; n < warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; n++){ warpindex[l][n] = 0; warpvalue[l][n] = 0.0; } mapdispl[l] = new int[buffdispl[l][numblocks]+1]; mapdispl[l][0] = 0; for(int buff = 0; buff < buffdispl[l][numblocks]; buff++) mapdispl[l][buff+1] = mapdispl[l][buff] + mapnz[buff]; totmapnz += mapdispl[l][buffdispl[l][numblocks]]; OR_FATAL(hipHostMalloc((void**)&map[l],sizeof(MAPPREC)*mapdispl[l][buffdispl[l][numblocks]])); #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]; n++) mapnz[n] = 0; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++) if(temp[n]){ int buff = footprint/buffsize; map[l][mapdispl[l][buffdispl[l][b]+buff]+mapnz[buffdispl[l][b]+buff]] = n; mapnz[buffdispl[l][b]+buff]++; temp[n] = footprint; footprint++; } for(int buff = 0; buff < numbuff[b]; buff++) for(int warp = 0; warp < numwarp; warp++){ int tempnz[WARPSIZE] = {0}; for(int t = 0; t < WARPSIZE; t++) for(int n = csrdispl[l][b*blocksize+warp*WARPSIZE+t]; n < csrdispl[l][b*blocksize+warp*WARPSIZE+t+1]; n++) if(temp[csrindex[l][n]]/buffsize==buff){ int ind = (warpdispl[l][(buffdispl[l][b]+buff)*numwarp+warp]+tempnz[t])*WARPSIZE+t; warpindex[l][ind] = temp[csrindex[l][n]]%buffsize; warpvalue[l][ind] = csrvalue[l][n]; tempnz[t]++; } } } delete[] numbuff; delete[] mapnz; delete[] warpnz; delete[] csrdispl[l]; delete[] csrindex[l]; delete[] csrvalue[l]; } delete[] temptag; delete[] csrdispl; delete[] csrindex; delete[] csrvalue; if(myid==0)printf("total buffers: %d (%f per block)\n",totbuff,totbuff/(float)layer/numblocks); if(myid==0)printf("total map: %d (%f per block)\n",totmapnz,totmapnz/(float)layer/numblocks); if(myid==0)printf("total warpnz: %d (%f per buffer)\n",totwarpnz,totwarpnz/(float)totbuff); if(myid==0)printf("iefficiency: %f\n",totwarpnz*(float)WARPSIZE/(layer*(float)neuron*32)); if(myid==0)printf("\n"); /*if(myid==0) for(int l = 0; l < 5; l++) for(int buff = 0; buff < 15; buff++) for(int warp = 0; warp < numwarp; warp++){ int nz = warpdispl[l][buff*numwarp+warp+1]-warpdispl[l][buff*numwarp+warp]; printf("Layer %d buff %d warp %d nz %d\n",l,buff,buff*numwarp+warp,nz); for(int m = warpdispl[l][buff*numwarp+warp]; m < warpdispl[l][buff*numwarp+warp+1]; m++){ for(int t = 0; t < WARPSIZE; t++) printf("%e ",__half2float(warpvalue[l][m*WARPSIZE+t])); printf("\n"); } }*/ };
cc19c17d3f66d33fb61cf3285d1fa7cbc73e803e.cu
#include "vars.h" #include "cuda_runtime.hpp" #include <cuda.h> extern int neuron; extern int layer; extern int batch; extern int input; extern float bias; extern int **csrdispl; extern INDPREC **csrindex; extern VALPREC **csrvalue; extern FEATPREC *currfeat; extern FEATPREC *nextfeat; extern int *active; extern int *categories; extern int *globalcategories; extern int myid; extern int numproc; extern int numthreads; extern int *numbatch; extern int *batchdispl; extern int mybatch; extern int extbatch; extern Duration timekernel; extern Duration timecopy; int **csrdispl_d; INDPREC **csrindex_d; VALPREC **csrvalue_d; int **buffdispl; int **mapdispl; int **warpdispl; MAPPREC **map; INDPREC **warpindex; VALPREC **warpvalue; int **buffdispl_d; int **mapdispl_d; int **warpdispl_d; MAPPREC *mapbuff_d; INDPREC *indbuff_d; VALPREC *valbuff_d;; #ifdef OUTOFCORE int weightsizemax; int mapsizemax; #ifdef OVERLAP MAPPREC *mapstream_d; INDPREC *indstream_d; VALPREC *valstream_d; #endif #else MAPPREC **map_d; INDPREC **warpindex_d; VALPREC **warpvalue_d; #endif FEATPREC *currfeat_d; FEATPREC *nextfeat_d; int *active_d; int *categories_d; int blocksize; int numblocks; int numwarp; int buffsize; cudaEvent_t copystart, copystop; cudaEvent_t kernelstart, kernelstop; cudaStream_t copystream; cudaStream_t kernelstream; float elapsedTime; __device__ float __ReLU(float x){ return x<0.0?0.0:x>32.0?32.0:x; }; __global__ void __launch_bounds__(256,4) dummy_kernel(FEATPREC *nextfeat, FEATPREC *currfeat, int buffsize, int *buffdispl, int *mapdispl, MAPPREC *map, int *displ, INDPREC *index, VALPREC *value, float bias , int neuron, int *categories, int *active){ extern __shared__ float shared[]; int wind = threadIdx.x%WARPSIZE; float reduce[MINIBATCH] = {0.0}; for(int buff = buffdispl[blockIdx.x]; buff < buffdispl[blockIdx.x+1]; buff++){ int mapnz = mapdispl[buff+1]-mapdispl[buff]; for(int n = threadIdx.x; n < mapnz; n += blockDim.x){ int ind = map[mapdispl[buff]+n]; for(unsigned int f = 0; f < MINIBATCH; f++) shared[f*buffsize+n] = currfeat[categories[blockIdx.y*MINIBATCH+f]* (unsigned int) neuron+ind]; } __syncthreads(); int warp = (buff*blockDim.x+threadIdx.x)/WARPSIZE; for(int m = displ[warp]; m < displ[warp+1]; m++){ int ind = index[m*WARPSIZE+wind]; float val = value[m*WARPSIZE+wind]; for(int f = 0; f < MINIBATCH; f++) reduce[f] += shared[f*buffsize+ind]*val; } __syncthreads(); } int m = blockIdx.x*blockDim.x+threadIdx.x; for(int f = 0; f < MINIBATCH; f++) if(nextfeat[(blockIdx.y*MINIBATCH+f)*neuron+m]=__ReLU(reduce[f]+bias)) atomicAdd(active+blockIdx.y*MINIBATCH+f,1); }; void setup_gpu(){ OR_FATAL(cudaSetDevice(myid%6)); printf("myid %d mydevice %d\n",myid,myid%4); OR_FATAL(cudaFuncSetAttribute(dummy_kernel,cudaFuncAttributeMaxDynamicSharedMemorySize,98304)); if(myid==0){ int deviceCount; OR_FATAL(cudaGetDeviceCount(&deviceCount)); printf("\n"); printf("Device Count: %d\n",deviceCount); int dev = 0; cudaDeviceProp deviceProp; OR_FATAL(cudaGetDeviceProperties(&deviceProp, dev)); printf("Device %d name: %s\n",dev,deviceProp.name); printf("Computational Capabilities: %d, %d\n",deviceProp.major,deviceProp.minor); printf("Maximum global memory size: %lu\n",deviceProp.totalGlobalMem); printf("Maximum constant memory size: %lu\n",deviceProp.totalConstMem); printf("Maximum shared memory size per block: %lu\n",deviceProp.sharedMemPerBlock); printf("Maximum block dimensions: %dx%dx%d\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]); printf("Maximum grid dimensions: %dx%dx%d\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]); printf("Maximum threads per block: %d\n",deviceProp.maxThreadsPerBlock); printf("Warp size: %d\n",deviceProp.warpSize); printf("\n"); } OR_FATAL(cudaEventCreate(&kernelstart)); OR_FATAL(cudaEventCreate(&kernelstop)); OR_FATAL(cudaEventCreate(&copystart)); OR_FATAL(cudaEventCreate(&copystop)); OR_FATAL(cudaStreamCreate(&copystream)); OR_FATAL(cudaStreamCreate(&kernelstream)); char *chartemp; chartemp = getenv("BLOCKSIZE"); blocksize = atoi(chartemp); chartemp = getenv("BUFFER"); buffsize = atoi(chartemp)*1024/sizeof(float)/MINIBATCH; numblocks = neuron/blocksize; numwarp = blocksize/WARPSIZE; if(myid==0){ printf("MINIBATCH SIZE: %d\n",MINIBATCH); printf("BLOCK SIZE: %d\n",blocksize); printf("WARP SIZE: %d\n",WARPSIZE); printf("NUM BLOCKS: %d\n",numblocks); printf("NUMWARPS: %d\n",numwarp); printf("BUFFER SIZE: %d (%f KB) PER FEATURE: %d (%f KB)\n",buffsize*MINIBATCH,buffsize*sizeof(float)/1024.0*MINIBATCH,buffsize,buffsize*sizeof(float)/1024.0); printf("\n"); } preproc(); double memother = 0.0; OR_FATAL(cudaMallocHost((void**)&globalcategories,sizeof(int)*mybatch)); OR_FATAL(cudaMallocHost((void**)&categories,sizeof(int)*mybatch)); OR_FATAL(cudaMallocHost((void**)&active,sizeof(int)*mybatch)); OR_FATAL(cudaMalloc((void**)&active_d,sizeof(int)*extbatch)); OR_FATAL(cudaMalloc((void**)&categories_d,sizeof(int)*extbatch)); memother += sizeof(int)*extbatch/1.0e9; memother += sizeof(int)*extbatch/1.0e9; for(int k = 0; k < mybatch; k++){ active[k] = neuron; categories[k] = k; globalcategories[k] = batchdispl[myid]+k; } OR_FATAL(cudaMemset(active_d,0,sizeof(int)*extbatch)); OR_FATAL(cudaMemset(categories_d,0,sizeof(int)*extbatch)); OR_FATAL(cudaMemcpy(active_d,active,sizeof(int)*mybatch,cudaMemcpyHostToDevice)); OR_FATAL(cudaMemcpy(categories_d,categories,sizeof(int)*mybatch,cudaMemcpyHostToDevice)); #ifdef OUTOFCORE if(myid==0)printf("OUT OF CORE IS ENABLED\n"); #ifdef OVERLAP if(myid==0)printf("OVERLAPPING IS ENABLED\n"); #else if(myid==0)printf("OVERLAPPING IS DISABLED\n"); #endif #else if(myid==0)printf("OUT OF CORE IS DISABLED\n"); #endif double memweight = 0.0; double memdispl = 0.0; double memmap = 0.0; buffdispl_d = new int*[layer]; mapdispl_d = new int*[layer]; warpdispl_d = new int*[layer]; #ifdef OUTOFCORE weightsizemax = 0; mapsizemax = 0; #else map_d = new MAPPREC*[layer]; warpindex_d = new INDPREC*[layer]; warpvalue_d = new VALPREC*[layer]; #endif for(int l = 0; l < layer; l++){ OR_FATAL(cudaMalloc((void**)&buffdispl_d[l],sizeof(int)*(numblocks+1))); OR_FATAL(cudaMalloc((void**)&mapdispl_d[l],sizeof(int)*(buffdispl[l][numblocks]+1))); OR_FATAL(cudaMalloc((void**)&warpdispl_d[l],sizeof(int)*(buffdispl[l][numblocks]*numwarp+1))); memdispl += sizeof(int)*(numblocks+1)/1.0e9; memdispl += sizeof(int)*(buffdispl[l][numblocks]+1)/1.0e9; memdispl += sizeof(int)*(buffdispl[l][numblocks]*numwarp+1)/1.0e9; OR_FATAL(cudaMemcpy(buffdispl_d[l],buffdispl[l],sizeof(int)*(numblocks+1),cudaMemcpyHostToDevice)); OR_FATAL(cudaMemcpy(mapdispl_d[l],mapdispl[l],sizeof(int)*(buffdispl[l][numblocks]+1),cudaMemcpyHostToDevice)); OR_FATAL(cudaMemcpy(warpdispl_d[l],warpdispl[l],sizeof(int)*(buffdispl[l][numblocks]*numwarp+1),cudaMemcpyHostToDevice)); #ifdef OUTOFCORE int mapsize = mapdispl[l][buffdispl[l][numblocks]]; if(mapsize > mapsizemax) mapsizemax = mapsize; int weightsize = warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; if(weightsize > weightsizemax) weightsizemax = weightsize; #else OR_FATAL(cudaMalloc((void**)&map_d[l],sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]]))); OR_FATAL(cudaMalloc((void**)&warpindex_d[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE)); OR_FATAL(cudaMalloc((void**)&warpvalue_d[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE)); memmap += sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]])/1.0e9; memweight += sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE/1.0e9; memweight += sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE/1.0e9; OR_FATAL(cudaMemcpy(map_d[l],map[l],sizeof(MAPPREC)*(mapdispl[l][buffdispl[l][numblocks]]),cudaMemcpyHostToDevice)); OR_FATAL(cudaMemcpy(warpindex_d[l],warpindex[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice)); OR_FATAL(cudaMemcpy(warpvalue_d[l],warpvalue[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice)); #endif } #ifdef OUTOFCORE if(myid==0)printf("\n"); if(myid==0)printf("mapsizemax: %d (%f KB)\n",mapsizemax,sizeof(MAPPREC)*mapsizemax/1.0e6); if(myid==0)printf("weightsizemax: %d (%f KB)\n",weightsizemax,(sizeof(INDPREC)+sizeof(VALPREC))*weightsizemax/1.0e6); #ifdef OVERLAP OR_FATAL(cudaMalloc((void**)&mapstream_d,sizeof(MAPPREC)*mapsizemax*2)); OR_FATAL(cudaMalloc((void**)&indstream_d,sizeof(INDPREC)*weightsizemax*2)); OR_FATAL(cudaMalloc((void**)&valstream_d,sizeof(VALPREC)*weightsizemax*2)); memmap += 2*sizeof(MAPPREC)*mapsizemax/1.0e9; memweight += 2*sizeof(INDPREC)*weightsizemax/1.0e9; memweight += 2*sizeof(VALPREC)*weightsizemax/1.0e9; OR_FATAL(cudaMemcpy(mapstream_d,map[0],sizeof(MAPPREC)*mapdispl[0][buffdispl[0][numblocks]],cudaMemcpyHostToDevice)); OR_FATAL(cudaMemcpy(indstream_d,warpindex[0],sizeof(INDPREC)*warpdispl[0][buffdispl[0][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice)); OR_FATAL(cudaMemcpy(valstream_d,warpvalue[0],sizeof(VALPREC)*warpdispl[0][buffdispl[0][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice)); #else OR_FATAL(cudaMalloc((void**)&mapbuff_d,sizeof(MAPPREC)*mapsizemax)); OR_FATAL(cudaMalloc((void**)&indbuff_d,sizeof(INDPREC)*weightsizemax)); OR_FATAL(cudaMalloc((void**)&valbuff_d,sizeof(VALPREC)*weightsizemax)); memmap += sizeof(MAPPREC)*mapsizemax/1.0e9; memweight += sizeof(INDPREC)*weightsizemax/1.0e9; memweight += sizeof(VALPREC)*weightsizemax/1.0e9; #endif #endif double memfeat = 0.0; fprintf(stderr, "extbatch=%d, neuron=%d\n", extbatch, neuron); { const size_t bytes = sizeof(FEATPREC) * size_t(extbatch) * size_t(neuron); fflush(stdout); fprintf(stderr, "cudaMalloc %lu MB\n", bytes/1024/1024); if (cudaSuccess != cudaMalloc((void**)&currfeat_d,bytes)) { fprintf(stderr, "ERROR: need more GPU memory\n"); exit(EXIT_FAILURE); } fprintf(stderr, "cudaMalloc %lu MB\n", bytes/1024/1024); if (cudaSuccess != cudaMalloc((void**)&nextfeat_d,bytes)) { fprintf(stderr, "ERROR: need more GPU memory\n"); exit(EXIT_FAILURE); } memfeat += bytes/1.0e9; memfeat += bytes/1.0e9; OR_FATAL(cudaMemset(currfeat_d,0,bytes)); OR_FATAL(cudaMemset(nextfeat_d,0,bytes)); OR_FATAL(cudaMemcpy(currfeat_d,currfeat,bytes,cudaMemcpyHostToDevice)); } double memothers[numproc]; double memweights[numproc]; double memdispls[numproc]; double memmaps[numproc]; double memfeats[numproc]; // MPI_Allgather(&memother,1,MPI_DOUBLE,memothers,1,MPI_DOUBLE,MPI_COMM_WORLD); // MPI_Allgather(&memweight,1,MPI_DOUBLE,memweights,1,MPI_DOUBLE,MPI_COMM_WORLD); // MPI_Allgather(&memdispl,1,MPI_DOUBLE,memdispls,1,MPI_DOUBLE,MPI_COMM_WORLD); // MPI_Allgather(&memmap,1,MPI_DOUBLE,memmaps,1,MPI_DOUBLE,MPI_COMM_WORLD); // MPI_Allgather(&memfeat,1,MPI_DOUBLE,memfeats,1,MPI_DOUBLE,MPI_COMM_WORLD); memothers[0] = memother; memweights[0] = memweight; memdispls[0] = memdispl; memmaps[0] = memmap; memfeats[0] = memfeat; if(myid==0){ double memmax = 0.0; printf("\n"); for(int p = 0; p < numproc; p++){ double memtot = memdispls[p]+memmaps[p]+memweights[p]+memfeats[p]; printf("GPU %d: OTHERS: %f DISPLS: %f MAPS: %f WEIGHTS: %f FEATURES: %f TOTAL: %f GB\n",p,memothers[p],memdispls[p],memmaps[p],memweights[p],memfeats[p],memtot); if(memtot>memmax)memmax=memtot; } printf("MAX GPU MEM: %f GB\n",memmax); } } /* Simultaneously launch the kernel and copy weights for the next layer. Two streams: kernelStream and copyStream. kernelStream contains the kernel, as well as the associated memset, and bookkeeping operations copyStream just has the copy operations for the next layer use copyStart / copyStop events to time the stream, and start/stop events to time the kernel */ void infer_gpu(int l){ /* if OUTOFCORE and OVERLAP, point at the right part of the double-buffer to get the weights from the previous iteration if OUTOFCORE and !OVERLAP, copy arguments into the kernel otherwise, just get the right layer pointers */ #ifdef OUTOFCORE #ifdef OVERLAP mapbuff_d = mapstream_d+(l%2)*mapsizemax; indbuff_d = indstream_d+(l%2)*weightsizemax; valbuff_d = valstream_d+(l%2)*weightsizemax; OR_FATAL(cudaStreamSynchronize(copystream)); #else OR_FATAL(cudaEventRecord(copystart,kernelstream)); int weightsize = warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; OR_FATAL(cudaMemcpyAsync(indbuff_d,warpindex[l],sizeof(INDPREC)*weightsize,cudaMemcpyHostToDevice,kernelstream)); OR_FATAL(cudaMemcpyAsync(valbuff_d,warpvalue[l],sizeof(VALPREC)*weightsize,cudaMemcpyHostToDevice,kernelstream)); int mapsize = mapdispl[l][buffdispl[l][numblocks]]; OR_FATAL(cudaMemcpyAsync(mapbuff_d,map[l],sizeof(MAPPREC)*mapsize,cudaMemcpyHostToDevice,kernelstream)); OR_FATAL(cudaEventRecord(copystop,kernelstream)); #endif #else mapbuff_d = map_d[l]; indbuff_d = warpindex_d[l]; valbuff_d = warpvalue_d[l]; #endif dim3 block(blocksize); dim3 grid(numblocks,(mybatch+MINIBATCH-1)/MINIBATCH); // initialize active features in the batch OR_FATAL(cudaMemsetAsync(active_d,0,sizeof(int)*mybatch,kernelstream)); OR_FATAL(cudaEventRecord(kernelstart,kernelstream)); dummy_kernel<<<grid,block,sizeof(float)*buffsize*MINIBATCH,kernelstream>>>(nextfeat_d,currfeat_d,buffsize,buffdispl_d[l],mapdispl_d[l],mapbuff_d,warpdispl_d[l],indbuff_d,valbuff_d,bias,neuron,categories_d,active_d); OR_FATAL(cudaEventRecord(kernelstop,kernelstream)); OR_FATAL(cudaMemcpyAsync(active,active_d,sizeof(int)*mybatch,cudaMemcpyDeviceToHost,kernelstream)); #ifdef OUTOFCORE #ifdef OVERLAP if(l+1 < layer){ OR_FATAL(cudaMemcpyAsync(mapstream_d+((l+1)%2)*mapsizemax,map[l+1],sizeof(MAPPREC)*mapdispl[l+1][buffdispl[l+1][numblocks]],cudaMemcpyHostToDevice,copystream)); OR_FATAL(cudaMemcpyAsync(indstream_d+((l+1)%2)*weightsizemax,warpindex[l+1],sizeof(INDPREC)*warpdispl[l+1][buffdispl[l+1][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice,copystream)); OR_FATAL(cudaMemcpyAsync(valstream_d+((l+1)%2)*weightsizemax,warpvalue[l+1],sizeof(VALPREC)*warpdispl[l+1][buffdispl[l+1][numblocks]*numwarp]*WARPSIZE,cudaMemcpyHostToDevice,copystream)); } #else OR_FATAL(cudaEventElapsedTime(&elapsedTime,copystart,copystop)); timecopy += elapsedTime/1.0e3; #endif #endif OR_FATAL(cudaStreamSynchronize(kernelstream)); int feature = 0; for(int k = 0; k < mybatch; k++) if(active[k]){ globalcategories[feature] = globalcategories[k]; categories[feature] = k; feature++; } mybatch = feature; OR_FATAL(cudaMemcpyAsync(categories_d,categories,sizeof(int)*feature,cudaMemcpyHostToDevice,kernelstream)); OR_FATAL(cudaEventElapsedTime(&elapsedTime,kernelstart,kernelstop)); timekernel += std::chrono::duration<float, std::milli>(elapsedTime); FEATPREC *tempfeat_d = currfeat_d; currfeat_d = nextfeat_d; nextfeat_d = tempfeat_d; //int allfeature = 0; //MPI_Allreduce(&feature,&allfeature,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); //if(myid==0)printf("layer %d features %d\n",l,allfeature); }; void preproc(){ buffdispl = new int*[layer]; mapdispl = new int*[layer]; warpdispl = new int*[layer]; map = new MAPPREC*[layer]; warpindex = new INDPREC*[layer]; warpvalue = new VALPREC*[layer]; int totbuff = 0; int totmapnz = 0; int totwarpnz = 0; int *temptag = new int[neuron*numthreads]; for(int l = 0; l < layer; l++){ //if(myid==0)printf("preprocessing layer %d\n",l); int *numbuff = new int[numblocks]; buffdispl[l] = new int[numblocks+1]; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++){ if(temp[n]) footprint++; } numbuff[b] = (footprint+buffsize-1)/buffsize; } buffdispl[l][0] = 0; for(int b = 0; b < numblocks; b++) buffdispl[l][b+1] = buffdispl[l][b]+numbuff[b]; totbuff += buffdispl[l][numblocks]; int *warpnz = new int[buffdispl[l][numblocks]*numwarp]; #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]*numwarp; n++) warpnz[n] = 0; int *mapnz = new int[buffdispl[l][numblocks]]; #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]; n++) mapnz[n] = 0; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++) if(temp[n]){ int buff = footprint/buffsize; mapnz[buffdispl[l][b]+buff]++; temp[n] = buff; footprint++; } for(int buff = 0; buff < numbuff[b]; buff++) for(int warp = 0; warp < numwarp; warp++){ int tempnz[WARPSIZE] = {0}; for(int t = 0; t < WARPSIZE; t++) for(int n = csrdispl[l][b*blocksize+warp*WARPSIZE+t]; n < csrdispl[l][b*blocksize+warp*WARPSIZE+t+1]; n++) if(temp[csrindex[l][n]]==buff) tempnz[t]++; int warpmax = 0; for(int t = 0; t < WARPSIZE; t++) if(tempnz[t]>warpmax) warpmax = tempnz[t]; warpnz[(buffdispl[l][b]+buff)*numwarp+warp] = warpmax; } } warpdispl[l] = new int[buffdispl[l][numblocks]*numwarp+1]; warpdispl[l][0] = 0; for(int warp = 0; warp < buffdispl[l][numblocks]*numwarp; warp++) warpdispl[l][warp+1] = warpdispl[l][warp]+warpnz[warp]; totwarpnz += warpdispl[l][buffdispl[l][numblocks]*numwarp]; OR_FATAL(cudaMallocHost((void**)&warpindex[l],sizeof(INDPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE)); OR_FATAL(cudaMallocHost((void**)&warpvalue[l],sizeof(VALPREC)*warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE)); #pragma omp parallel for for(int n = 0; n < warpdispl[l][buffdispl[l][numblocks]*numwarp]*WARPSIZE; n++){ warpindex[l][n] = 0; warpvalue[l][n] = 0.0; } mapdispl[l] = new int[buffdispl[l][numblocks]+1]; mapdispl[l][0] = 0; for(int buff = 0; buff < buffdispl[l][numblocks]; buff++) mapdispl[l][buff+1] = mapdispl[l][buff] + mapnz[buff]; totmapnz += mapdispl[l][buffdispl[l][numblocks]]; OR_FATAL(cudaMallocHost((void**)&map[l],sizeof(MAPPREC)*mapdispl[l][buffdispl[l][numblocks]])); #pragma omp parallel for for(int n = 0; n < buffdispl[l][numblocks]; n++) mapnz[n] = 0; #pragma omp parallel for for(int b = 0; b < numblocks; b++){ int *temp = temptag+omp_get_thread_num()*neuron; for(int n = 0; n < neuron; n++) temp[n] = 0; for(int m = b*blocksize; m < (b+1)*blocksize; m++) for(int n = csrdispl[l][m]; n < csrdispl[l][m+1]; n++) temp[csrindex[l][n]]++; int footprint = 0; for(int n = 0; n < neuron; n++) if(temp[n]){ int buff = footprint/buffsize; map[l][mapdispl[l][buffdispl[l][b]+buff]+mapnz[buffdispl[l][b]+buff]] = n; mapnz[buffdispl[l][b]+buff]++; temp[n] = footprint; footprint++; } for(int buff = 0; buff < numbuff[b]; buff++) for(int warp = 0; warp < numwarp; warp++){ int tempnz[WARPSIZE] = {0}; for(int t = 0; t < WARPSIZE; t++) for(int n = csrdispl[l][b*blocksize+warp*WARPSIZE+t]; n < csrdispl[l][b*blocksize+warp*WARPSIZE+t+1]; n++) if(temp[csrindex[l][n]]/buffsize==buff){ int ind = (warpdispl[l][(buffdispl[l][b]+buff)*numwarp+warp]+tempnz[t])*WARPSIZE+t; warpindex[l][ind] = temp[csrindex[l][n]]%buffsize; warpvalue[l][ind] = csrvalue[l][n]; tempnz[t]++; } } } delete[] numbuff; delete[] mapnz; delete[] warpnz; delete[] csrdispl[l]; delete[] csrindex[l]; delete[] csrvalue[l]; } delete[] temptag; delete[] csrdispl; delete[] csrindex; delete[] csrvalue; if(myid==0)printf("total buffers: %d (%f per block)\n",totbuff,totbuff/(float)layer/numblocks); if(myid==0)printf("total map: %d (%f per block)\n",totmapnz,totmapnz/(float)layer/numblocks); if(myid==0)printf("total warpnz: %d (%f per buffer)\n",totwarpnz,totwarpnz/(float)totbuff); if(myid==0)printf("iefficiency: %f\n",totwarpnz*(float)WARPSIZE/(layer*(float)neuron*32)); if(myid==0)printf("\n"); /*if(myid==0) for(int l = 0; l < 5; l++) for(int buff = 0; buff < 15; buff++) for(int warp = 0; warp < numwarp; warp++){ int nz = warpdispl[l][buff*numwarp+warp+1]-warpdispl[l][buff*numwarp+warp]; printf("Layer %d buff %d warp %d nz %d\n",l,buff,buff*numwarp+warp,nz); for(int m = warpdispl[l][buff*numwarp+warp]; m < warpdispl[l][buff*numwarp+warp+1]; m++){ for(int t = 0; t < WARPSIZE; t++) printf("%e ",__half2float(warpvalue[l][m*WARPSIZE+t])); printf("\n"); } }*/ };
9b6accef6a993ff750437ad8d0dce1ac37b4d63b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void test() { // unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff); // printf("cnt : %d \n",d_PointCounter[0]); }
9b6accef6a993ff750437ad8d0dce1ac37b4d63b.cu
#include "includes.h" __global__ void test() { // unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff); // printf("cnt : %d \n",d_PointCounter[0]); }
542615298c1184f878a92b13d2ab0e68fefb4a22.hip
// !!! This is a file automatically generated by hipify!!! /* * Software License Agreement (BSD License) * * Copyright (c) 2009, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $Id$ * */ #ifdef _WIN32 # define NOMINMAX # define WIN32_LEAN_AND_MEAN # include <windows.h> #endif #include <pcl/pcl_exports.h> #include "pcl/cuda/sample_consensus/multi_ransac.h" #include "pcl/cuda/time_gpu.h" #include <stdio.h> #include <pcl/cuda/time_cpu.h> //CUPRINTF #include "cuPrintf.hip" namespace pcl { namespace cuda { int min_nr_in_shape = 1; ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> bool MultiRandomSampleConsensus<Storage>::computeModel (int debug_verbosity_level) { double starttime = pcl::cuda::getTime (); int counter = 0; // Warn and exit if no threshold was set if (threshold_ == DBL_MAX) { std::cerr << "[pcl::cuda::MultiRandomSampleConsensus::computeModel] No threshold set!" << std::endl; return (false); } // compute number of points int nr_points = sac_model_->getIndices ()->size (); int nr_remaining_points = nr_points; //std::cerr << "nr_points = " << nr_points << std::endl; // number of total iterations unsigned int cur_iteration = 0; // number of valid iterations int valid_iterations = 0; // each batch has a vector of plane coefficients (float4) std::vector<Hypotheses> h(max_batches_); std::vector<typename Storage<int>::type> h_samples (max_batches_); std::vector<float3> centroids (max_batches_ * iterations_per_batch_); // current batch number int cur_batch = 0; //// stencil vector that holds the current inliers std::vector<IndicesPtr> hypotheses_inliers_stencils (max_batches_ * iterations_per_batch_); std::vector<int> hypotheses_inlier_count (max_batches_ * iterations_per_batch_); // initialize some things all_inliers_.clear (); all_model_coefficients_.clear (); all_model_centroids_.clear (); int n_inliers_count = 0; int n_best_inliers_count = 0; int good_coeff = -1; float k = max_batches_ * iterations_per_batch_; //thrust::host_vector<float3> host_points = sac_model_->getInputCloud()->points; //std::cerr << "Input Points:" << std::endl; //for (unsigned int print_iter = 0; print_iter < nr_points; ++print_iter) //{ // std::cerr << print_iter << " : [ " // << host_points[print_iter].x << ", " // << host_points[print_iter].y << ", " // << host_points[print_iter].z << " ]" << std::endl; //} std::vector<bool> hypothesis_valid (max_batches_ * iterations_per_batch_, true); ScopeTimeCPU t ("ALLLLLLLLLLL"); do // multiple models .. { double now = pcl::cuda::getTime (); if ((now - starttime) > 1) { std::cout << "SLOW FRAME " << counter++ << std::endl; starttime = now; } thrust::host_vector<int> host_samples; thrust::host_vector<float4> host_coeffs; // make sure that sac_model_->indices_ only contains remaining point indices sac_model_->getIndices (); // generate a new batch of hypotheses { ScopeTimeCPU t ("generateModelHypotheses"); sac_model_->generateModelHypotheses (h[cur_batch], h_samples[cur_batch], iterations_per_batch_); } host_samples = h_samples[cur_batch]; host_coeffs = h[cur_batch]; if (debug_verbosity_level > 1) { std::cerr << "Samples / Coefficients:" << std::endl; for (unsigned int print_iter = 0; print_iter < iterations_per_batch_; ++print_iter) { std::cerr << host_samples[print_iter] << " : [ " << host_coeffs[print_iter].x << ", " << host_coeffs[print_iter].y << ", " << host_coeffs[print_iter].z << ", " << host_coeffs[print_iter].w << std::endl; } } // evaluate each hypothesis in this batch { ScopeTimeCPU t ("evaluate"); for (unsigned int i = 0; i < iterations_per_batch_; i++, cur_iteration ++, valid_iterations ++) { // hypothesis could be invalid because it's initial sample point was inlier previously if (!hypothesis_valid[cur_batch * iterations_per_batch_ + i]) { hypotheses_inlier_count[cur_iteration] = 0; valid_iterations --; continue; } // compute inliers for each model IndicesPtr inl_stencil; { ScopeTimeCPU t ("selectWithinDistance"); n_inliers_count = sac_model_->selectWithinDistance (h[cur_batch], i, threshold_, inl_stencil, centroids[cur_iteration]); } // store inliers and inlier count if (n_inliers_count < min_nr_in_shape) { n_inliers_count = 0; inl_stencil.reset (); // release stencil hypothesis_valid[cur_batch * iterations_per_batch_ + i] = false; } hypotheses_inliers_stencils[cur_iteration] = inl_stencil; hypotheses_inlier_count[cur_iteration] = n_inliers_count; // Better match ? if (n_inliers_count > n_best_inliers_count) { n_best_inliers_count = n_inliers_count; good_coeff = cur_iteration; // Compute the k parameter (k=::log(z)/::log(1-w^n)) float w = (float)((float)n_best_inliers_count / (float)nr_remaining_points); float p_no_outliers = 1.0f - pow (w, 1.0f); p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0. if (p_no_outliers == 1.0f) k++; else k = std::log (1.0f - probability_) / std::log (p_no_outliers); } //fprintf (stderr, "[pcl::cuda::MultiRandomSampleConsensus::computeModel] Trial %d out of %f: %d inliers (best is: %d so far).\n", // cur_iteration, k, n_inliers_count, n_best_inliers_count); // check if we found a valid model { ScopeTimeCPU t("extracmodel"); if (valid_iterations >= k) { unsigned int extracted_model = good_coeff; //int nr_remaining_points_before_delete = nr_remaining_points; bool find_no_better = false; nr_remaining_points = sac_model_->deleteIndices (hypotheses_inliers_stencils[extracted_model]); //if (nr_remaining_points != nr_remaining_points_before_delete) { // Compute the k parameter (k=::log(z)/::log(1-w^n)) float w = (float)((float)min_nr_in_shape / (float)nr_remaining_points); float p_no_outliers = 1.0f - pow (w, 1.0f); p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0. if (p_no_outliers != 1.0f) { if (std::log (1.0f - probability_) / std::log (p_no_outliers) < valid_iterations) // we won't find a model with min_nr_in_shape points anymore... find_no_better = true; else if (debug_verbosity_level > 1) std::cerr << "------->" << std::log (1.0f - probability_) / std::log (p_no_outliers) << " -vs- " << valid_iterations << std::endl; } } std::cerr << "found model: " << n_best_inliers_count << ", points remaining: " << nr_remaining_points << " after " << valid_iterations << " / " << cur_iteration << " iterations" << std::endl; all_inliers_.push_back (hypotheses_inliers_stencils[extracted_model]); all_inlier_counts_.push_back (n_best_inliers_count); all_model_centroids_.push_back (centroids [extracted_model]); thrust::host_vector<float4> host_coeffs_extracted_model = h [extracted_model / iterations_per_batch_]; all_model_coefficients_.push_back (host_coeffs_extracted_model [extracted_model % iterations_per_batch_]); // so we only get it once: hypothesis_valid[extracted_model] = false; if (nr_remaining_points < (1.0-min_coverage_percent_) * nr_points) { std::cerr << "batches: " << cur_batch << ", valid iterations: " << valid_iterations << ", remaining points:" << nr_remaining_points << std::endl; return true; } if (find_no_better) { std::cerr << "will find no better model! batches: " << cur_batch << ", valid iterations: " << valid_iterations << ", remaining points:" << nr_remaining_points << std::endl; return true; } n_best_inliers_count = 0; k = max_batches_ * iterations_per_batch_; // go through all other models, invalidating those whose samples are inliers to the best one int counter_invalid = 0; int counter_inliers = 0; //for (unsigned int b = 0; b <= cur_batch; b++) unsigned int b = cur_batch; for (unsigned int j = 0; j < iterations_per_batch_; j++) { // todo: precheck for very similar models // if (h[best_coeff] - h[]) // <---- copies from device! slow! // continue; if (!hypothesis_valid[b * iterations_per_batch_ + j]) { //std::cerr << "model " << j << " in batch " << b <<" is an invalid model" << std::endl; counter_invalid ++; continue; } if ((b*iterations_per_batch_ + j) == extracted_model) { if (debug_verbosity_level > 1) std::cerr << "model " << j << " in batch " << b <<" is being extracted..." << std::endl; continue; } if (sac_model_->isSampleInlier (hypotheses_inliers_stencils[extracted_model], h_samples[b], j)) { //std::cerr << "sample point for model " << j << " in batch " << b <<" is inlier to best model " << extracted_model << std::endl; counter_inliers ++; hypothesis_valid[b * iterations_per_batch_ + j] = false; hypotheses_inlier_count[b*iterations_per_batch_ + j] = 0; if (j <= i) valid_iterations --; } else if (j <= i) // if it is not inlier to the best model, we subtract the inliers of the extracted model { //todo: recompute inliers... / deleteindices // ... determine best remaining model int old_score = hypotheses_inlier_count[b*iterations_per_batch_ + j]; if (old_score != 0) { //std::cerr << "inliers for model " << b*iterations_per_batch_ + j << " : " << old_score; n_inliers_count = sac_model_->deleteIndices (h[b], j, hypotheses_inliers_stencils[b*iterations_per_batch_ + j], hypotheses_inliers_stencils[extracted_model]); hypotheses_inlier_count[b*iterations_per_batch_ + j] = n_inliers_count; //std::cerr << " ---> " << hypotheses_inlier_count[b * iterations_per_batch_ + j] << std::endl; } // Better match ? if (n_inliers_count > n_best_inliers_count) { n_best_inliers_count = n_inliers_count; good_coeff = b * iterations_per_batch_ + j; // Compute the k parameter (k=::log(z)/::log(1-w^n)) float w = (float)((float)n_best_inliers_count / (float)nr_remaining_points); float p_no_outliers = 1.0f - pow (w, 1.0f); p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0. if (p_no_outliers == 1.0f) k++; else k = std::log (1.0f - probability_) / std::log (p_no_outliers); } } } //std::cerr << "invalid models: " << counter_invalid << " , inlier models: " << counter_inliers << std::endl; } } } } // one batch done, go to next cur_batch ++; } while (cur_iteration < max_batches_ * iterations_per_batch_); // coming here means we did all 5000 iterations.. // let's find out why it took so long. //std::cerr << "Inlier indices:" << std::endl; //thrust::host_vector<int> best_inlier_indices = *all_inliers_[0]; //for (unsigned int ii = 0; ii < best_inlier_indices.size (); ++ii) // std::cout << best_inlier_indices[ii] << std::endl; //std::cout << "Samples / Coefficients:" << std::endl; //for (unsigned int batch_iter = 0; batch_iter < max_batches_; ++batch_iter) //{ // thrust::host_vector<int> host_samples = h_samples[batch_iter]; // thrust::host_vector<float4> host_coeffs = h[batch_iter]; // for (unsigned int print_iter = 0; print_iter < iterations_per_batch_; ++print_iter) // { // std::cout << host_samples[print_iter] << " : [ " // << host_coeffs[print_iter].x << ", " // << host_coeffs[print_iter].y << ", " // << host_coeffs[print_iter].z << ", " // << host_coeffs[print_iter].w << " -- " << (hypothesis_valid[batch_iter * iterations_per_batch_ + print_iter]?"ISVALID":"INVALID") << std::endl; // } //} return (false); } template class PCL_EXPORTS MultiRandomSampleConsensus<Device>; template class PCL_EXPORTS MultiRandomSampleConsensus<Host>; } // namespace } // namespace
542615298c1184f878a92b13d2ab0e68fefb4a22.cu
/* * Software License Agreement (BSD License) * * Copyright (c) 2009, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $Id$ * */ #ifdef _WIN32 # define NOMINMAX # define WIN32_LEAN_AND_MEAN # include <windows.h> #endif #include <pcl/pcl_exports.h> #include "pcl/cuda/sample_consensus/multi_ransac.h" #include "pcl/cuda/time_gpu.h" #include <stdio.h> #include <pcl/cuda/time_cpu.h> //CUPRINTF #include "cuPrintf.cu" namespace pcl { namespace cuda { int min_nr_in_shape = 1; ////////////////////////////////////////////////////////////////////////// template <template <typename> class Storage> bool MultiRandomSampleConsensus<Storage>::computeModel (int debug_verbosity_level) { double starttime = pcl::cuda::getTime (); int counter = 0; // Warn and exit if no threshold was set if (threshold_ == DBL_MAX) { std::cerr << "[pcl::cuda::MultiRandomSampleConsensus::computeModel] No threshold set!" << std::endl; return (false); } // compute number of points int nr_points = sac_model_->getIndices ()->size (); int nr_remaining_points = nr_points; //std::cerr << "nr_points = " << nr_points << std::endl; // number of total iterations unsigned int cur_iteration = 0; // number of valid iterations int valid_iterations = 0; // each batch has a vector of plane coefficients (float4) std::vector<Hypotheses> h(max_batches_); std::vector<typename Storage<int>::type> h_samples (max_batches_); std::vector<float3> centroids (max_batches_ * iterations_per_batch_); // current batch number int cur_batch = 0; //// stencil vector that holds the current inliers std::vector<IndicesPtr> hypotheses_inliers_stencils (max_batches_ * iterations_per_batch_); std::vector<int> hypotheses_inlier_count (max_batches_ * iterations_per_batch_); // initialize some things all_inliers_.clear (); all_model_coefficients_.clear (); all_model_centroids_.clear (); int n_inliers_count = 0; int n_best_inliers_count = 0; int good_coeff = -1; float k = max_batches_ * iterations_per_batch_; //thrust::host_vector<float3> host_points = sac_model_->getInputCloud()->points; //std::cerr << "Input Points:" << std::endl; //for (unsigned int print_iter = 0; print_iter < nr_points; ++print_iter) //{ // std::cerr << print_iter << " : [ " // << host_points[print_iter].x << ", " // << host_points[print_iter].y << ", " // << host_points[print_iter].z << " ]" << std::endl; //} std::vector<bool> hypothesis_valid (max_batches_ * iterations_per_batch_, true); ScopeTimeCPU t ("ALLLLLLLLLLL"); do // multiple models .. { double now = pcl::cuda::getTime (); if ((now - starttime) > 1) { std::cout << "SLOW FRAME " << counter++ << std::endl; starttime = now; } thrust::host_vector<int> host_samples; thrust::host_vector<float4> host_coeffs; // make sure that sac_model_->indices_ only contains remaining point indices sac_model_->getIndices (); // generate a new batch of hypotheses { ScopeTimeCPU t ("generateModelHypotheses"); sac_model_->generateModelHypotheses (h[cur_batch], h_samples[cur_batch], iterations_per_batch_); } host_samples = h_samples[cur_batch]; host_coeffs = h[cur_batch]; if (debug_verbosity_level > 1) { std::cerr << "Samples / Coefficients:" << std::endl; for (unsigned int print_iter = 0; print_iter < iterations_per_batch_; ++print_iter) { std::cerr << host_samples[print_iter] << " : [ " << host_coeffs[print_iter].x << ", " << host_coeffs[print_iter].y << ", " << host_coeffs[print_iter].z << ", " << host_coeffs[print_iter].w << std::endl; } } // evaluate each hypothesis in this batch { ScopeTimeCPU t ("evaluate"); for (unsigned int i = 0; i < iterations_per_batch_; i++, cur_iteration ++, valid_iterations ++) { // hypothesis could be invalid because it's initial sample point was inlier previously if (!hypothesis_valid[cur_batch * iterations_per_batch_ + i]) { hypotheses_inlier_count[cur_iteration] = 0; valid_iterations --; continue; } // compute inliers for each model IndicesPtr inl_stencil; { ScopeTimeCPU t ("selectWithinDistance"); n_inliers_count = sac_model_->selectWithinDistance (h[cur_batch], i, threshold_, inl_stencil, centroids[cur_iteration]); } // store inliers and inlier count if (n_inliers_count < min_nr_in_shape) { n_inliers_count = 0; inl_stencil.reset (); // release stencil hypothesis_valid[cur_batch * iterations_per_batch_ + i] = false; } hypotheses_inliers_stencils[cur_iteration] = inl_stencil; hypotheses_inlier_count[cur_iteration] = n_inliers_count; // Better match ? if (n_inliers_count > n_best_inliers_count) { n_best_inliers_count = n_inliers_count; good_coeff = cur_iteration; // Compute the k parameter (k=std::log(z)/std::log(1-w^n)) float w = (float)((float)n_best_inliers_count / (float)nr_remaining_points); float p_no_outliers = 1.0f - pow (w, 1.0f); p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0. if (p_no_outliers == 1.0f) k++; else k = std::log (1.0f - probability_) / std::log (p_no_outliers); } //fprintf (stderr, "[pcl::cuda::MultiRandomSampleConsensus::computeModel] Trial %d out of %f: %d inliers (best is: %d so far).\n", // cur_iteration, k, n_inliers_count, n_best_inliers_count); // check if we found a valid model { ScopeTimeCPU t("extracmodel"); if (valid_iterations >= k) { unsigned int extracted_model = good_coeff; //int nr_remaining_points_before_delete = nr_remaining_points; bool find_no_better = false; nr_remaining_points = sac_model_->deleteIndices (hypotheses_inliers_stencils[extracted_model]); //if (nr_remaining_points != nr_remaining_points_before_delete) { // Compute the k parameter (k=std::log(z)/std::log(1-w^n)) float w = (float)((float)min_nr_in_shape / (float)nr_remaining_points); float p_no_outliers = 1.0f - pow (w, 1.0f); p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0. if (p_no_outliers != 1.0f) { if (std::log (1.0f - probability_) / std::log (p_no_outliers) < valid_iterations) // we won't find a model with min_nr_in_shape points anymore... find_no_better = true; else if (debug_verbosity_level > 1) std::cerr << "------->" << std::log (1.0f - probability_) / std::log (p_no_outliers) << " -vs- " << valid_iterations << std::endl; } } std::cerr << "found model: " << n_best_inliers_count << ", points remaining: " << nr_remaining_points << " after " << valid_iterations << " / " << cur_iteration << " iterations" << std::endl; all_inliers_.push_back (hypotheses_inliers_stencils[extracted_model]); all_inlier_counts_.push_back (n_best_inliers_count); all_model_centroids_.push_back (centroids [extracted_model]); thrust::host_vector<float4> host_coeffs_extracted_model = h [extracted_model / iterations_per_batch_]; all_model_coefficients_.push_back (host_coeffs_extracted_model [extracted_model % iterations_per_batch_]); // so we only get it once: hypothesis_valid[extracted_model] = false; if (nr_remaining_points < (1.0-min_coverage_percent_) * nr_points) { std::cerr << "batches: " << cur_batch << ", valid iterations: " << valid_iterations << ", remaining points:" << nr_remaining_points << std::endl; return true; } if (find_no_better) { std::cerr << "will find no better model! batches: " << cur_batch << ", valid iterations: " << valid_iterations << ", remaining points:" << nr_remaining_points << std::endl; return true; } n_best_inliers_count = 0; k = max_batches_ * iterations_per_batch_; // go through all other models, invalidating those whose samples are inliers to the best one int counter_invalid = 0; int counter_inliers = 0; //for (unsigned int b = 0; b <= cur_batch; b++) unsigned int b = cur_batch; for (unsigned int j = 0; j < iterations_per_batch_; j++) { // todo: precheck for very similar models // if (h[best_coeff] - h[]) // <---- copies from device! slow! // continue; if (!hypothesis_valid[b * iterations_per_batch_ + j]) { //std::cerr << "model " << j << " in batch " << b <<" is an invalid model" << std::endl; counter_invalid ++; continue; } if ((b*iterations_per_batch_ + j) == extracted_model) { if (debug_verbosity_level > 1) std::cerr << "model " << j << " in batch " << b <<" is being extracted..." << std::endl; continue; } if (sac_model_->isSampleInlier (hypotheses_inliers_stencils[extracted_model], h_samples[b], j)) { //std::cerr << "sample point for model " << j << " in batch " << b <<" is inlier to best model " << extracted_model << std::endl; counter_inliers ++; hypothesis_valid[b * iterations_per_batch_ + j] = false; hypotheses_inlier_count[b*iterations_per_batch_ + j] = 0; if (j <= i) valid_iterations --; } else if (j <= i) // if it is not inlier to the best model, we subtract the inliers of the extracted model { //todo: recompute inliers... / deleteindices // ... determine best remaining model int old_score = hypotheses_inlier_count[b*iterations_per_batch_ + j]; if (old_score != 0) { //std::cerr << "inliers for model " << b*iterations_per_batch_ + j << " : " << old_score; n_inliers_count = sac_model_->deleteIndices (h[b], j, hypotheses_inliers_stencils[b*iterations_per_batch_ + j], hypotheses_inliers_stencils[extracted_model]); hypotheses_inlier_count[b*iterations_per_batch_ + j] = n_inliers_count; //std::cerr << " ---> " << hypotheses_inlier_count[b * iterations_per_batch_ + j] << std::endl; } // Better match ? if (n_inliers_count > n_best_inliers_count) { n_best_inliers_count = n_inliers_count; good_coeff = b * iterations_per_batch_ + j; // Compute the k parameter (k=std::log(z)/std::log(1-w^n)) float w = (float)((float)n_best_inliers_count / (float)nr_remaining_points); float p_no_outliers = 1.0f - pow (w, 1.0f); p_no_outliers = (std::max) (std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by -Inf p_no_outliers = (std::min) (1.0f - std::numeric_limits<float>::epsilon (), p_no_outliers); // Avoid division by 0. if (p_no_outliers == 1.0f) k++; else k = std::log (1.0f - probability_) / std::log (p_no_outliers); } } } //std::cerr << "invalid models: " << counter_invalid << " , inlier models: " << counter_inliers << std::endl; } } } } // one batch done, go to next cur_batch ++; } while (cur_iteration < max_batches_ * iterations_per_batch_); // coming here means we did all 5000 iterations.. // let's find out why it took so long. //std::cerr << "Inlier indices:" << std::endl; //thrust::host_vector<int> best_inlier_indices = *all_inliers_[0]; //for (unsigned int ii = 0; ii < best_inlier_indices.size (); ++ii) // std::cout << best_inlier_indices[ii] << std::endl; //std::cout << "Samples / Coefficients:" << std::endl; //for (unsigned int batch_iter = 0; batch_iter < max_batches_; ++batch_iter) //{ // thrust::host_vector<int> host_samples = h_samples[batch_iter]; // thrust::host_vector<float4> host_coeffs = h[batch_iter]; // for (unsigned int print_iter = 0; print_iter < iterations_per_batch_; ++print_iter) // { // std::cout << host_samples[print_iter] << " : [ " // << host_coeffs[print_iter].x << ", " // << host_coeffs[print_iter].y << ", " // << host_coeffs[print_iter].z << ", " // << host_coeffs[print_iter].w << " -- " << (hypothesis_valid[batch_iter * iterations_per_batch_ + print_iter]?"ISVALID":"INVALID") << std::endl; // } //} return (false); } template class PCL_EXPORTS MultiRandomSampleConsensus<Device>; template class PCL_EXPORTS MultiRandomSampleConsensus<Host>; } // namespace } // namespace
61327172148b37276eb61a323abcce048991b35d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022 -2023, NVIDIA CORPORATION. All rights reserved. * * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <string> #include <vector> #include <stdio.h> #include <stdlib.h> #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) || defined(_MSC_VER) #define WINDOWS_LEAN_AND_MEAN #define NOMINMAX #include <windows.h> #include "getopt.h" #pragma warning(disable : 4819) const std::string separator = "\\"; #else #include <getopt.h> const std::string separator = "/"; #endif #include <fstream> #include <iostream> #include <chrono> #include <hip/hip_runtime.h> #include <nvtiff.h> using perfclock = std::chrono::high_resolution_clock; #define DIV_UP(a, b) (((a) + ((b)-1)) / (b)) #define CHECK_CUDA(call) \ { \ hipError_t err = call; \ if (hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", __FILE__, __LINE__, hipGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } #define CHECK_NVTIFF(call) \ { \ nvtiffStatus_t _e = (call); \ if (_e != NVTIFF_STATUS_SUCCESS) { \ fprintf(stderr, "nvtiff error code %d in file '%s' in line %i\n", _e, __FILE__, __LINE__); \ exit(EXIT_FAILURE); \ } \ } //#define LIBTIFF_TEST #ifdef LIBTIFF_TEST #include <tiffio.h> #endif #define MAX_STR_LEN (256) void write_pnm(const char* filename, unsigned char* chan, uint32_t ld, uint32_t width, uint32_t height, uint32_t BPP, uint32_t numcomp, uint32_t write_out_numcomp) { std::ofstream rOutputStream(filename); if (!rOutputStream) { std::cerr << "Cannot open output file: " << filename << std::endl; return; } if (numcomp == 1) { rOutputStream << "P5\n"; } else { rOutputStream << "P6\n"; } rOutputStream << "#nvTIFF\n"; rOutputStream << width << " " << height << "\n"; rOutputStream << (1 << BPP) - 1 << "\n"; if (BPP == 8) { for (uint32_t y = 0; y < height; y++) { for (uint32_t x = 0; x < width; x++) { for (uint32_t c = 0; c < write_out_numcomp; c++) { rOutputStream << chan[(y * ld + x) * numcomp + c]; } } } } else { uint16_t* chan16 = reinterpret_cast<uint16_t*>(chan); for (uint32_t y = 0; y < height; y++) { for (uint32_t x = 0; x < width; x++) { for (uint32_t c = 0; c < write_out_numcomp; c++) { uint32_t pix_val = chan16[(y * ld + x) * numcomp + c]; rOutputStream << static_cast<unsigned char>((pix_val) >> 8) << static_cast<unsigned char>((pix_val)&0xff); } } } } return; } int write_image(std::string input_filename, nvtiffImageInfo& image_info, unsigned char* chan, uint32_t image_id) { // Get the file name, without extension. // This will be used to rename the output file. size_t position = input_filename.rfind(separator); std::string out_filename = (std::string::npos == position) ? input_filename : input_filename.substr(position + 1, input_filename.size()); position = out_filename.rfind("."); out_filename = (std::string::npos == position) ? out_filename : out_filename.substr(0, position); out_filename += "_nvtiff_out_" + std::to_string(image_id); uint32_t num_samples = image_info.samples_per_pixel; uint32_t samples_written_to_file = image_info.samples_per_pixel; if (image_info.samples_per_pixel == 3 || image_info.samples_per_pixel == 4 || image_info.photometric_int == NVTIFF_PHOTOMETRIC_PALETTE) { out_filename += ".ppm"; samples_written_to_file = 3; } else if (image_info.samples_per_pixel == 1) { out_filename += ".pgm"; } else { printf("Unable to write image with %d samples per pixel, continuing to the next image..\n", image_info.samples_per_pixel); return EXIT_SUCCESS; } uint32_t bits_per_sample = image_info.bits_per_sample[0]; if (image_info.photometric_int == NVTIFF_PHOTOMETRIC_PALETTE) { num_samples = 3; samples_written_to_file = 3; bits_per_sample = 16; } if (bits_per_sample == 16 || bits_per_sample == 8) { write_pnm(out_filename.c_str(), chan, image_info.image_width, image_info.image_width, image_info.image_height, bits_per_sample, num_samples, samples_written_to_file); } else { printf("Unable to write to file for this set of tiff image, continuing to next image\n"); } return EXIT_SUCCESS; } static void usage(const char *pname) { fprintf(stdout, "Usage:\n" "%s [options] -f|--file <TIFF_FILE>\n" "\n" "General options:\n" "\n" "\t-d DEVICE_ID\n" "\t--device DEVICE_ID\n" "\t\tSpecifies the GPU to use for images decoding/encoding.\n" "\t\tDefault: device 0 is used.\n" "\n" "\t-v\n" "\t--verbose\n" "\t\tPrints some information about the decoded TIFF file.\n" "\n" "\t-h\n" "\t--help\n" "\t\tPrints this help\n" "\n" "Decoding options:\n" "\n" "\t-f TIFF_FILE\n" "\t--file TIFF_FILE\n" "\t\tSpecifies the TIFF file to decode. The code supports both single and multi-image\n" "\t\ttiff files with the following limitations: \n" "\t\t * color space must be either Grayscale (PhotometricInterp.=1) or RGB (=2) \n" "\t\t * image data compressed with LZW (Compression=5) or uncompressed \n" "\t\t * pixel components stored in \"chunky\" format (RGB..., PlanarConfiguration=1)\n" "\t\t for RGB images \n" "\t\t * image data must be organized in Strips, not Tiles \n" "\t\t * pixels of RGB images must be represented with at most 4 components \n" "\t\t * each component must be represented exactly with: \n" "\t\t * 8 bits for LZW compressed images \n" "\t\t * 8, 16 or 32 bits for uncompressed images \n" "\t\t * all images in the file must have the same properties \n" "\n" "\t-b BEG_FRM\n" "\t--frame-beg BEG_FRM\n" "\t\tSpecifies the image id in the input TIFF file to start decoding from. The image\n" "\t\tid must be a value between 0 and the total number of images in the file minus 1.\n" "\t\tValues less than 0 are clamped to 0.\n" "\t\tDefault: 0\n" "\n" "\t-e END_FRM\n" "\t--frame-end END_FRM\n" "\t\tSpecifies the image id in the input TIFF file to stop decoding at (included).\n" "\t\tThe image id must be a value between 0 and the total number of images in the\n" "\t\tfile minus 1. Values greater than num_images-1 are clamped to num_images-1.\n" "\t\tDefault: num_images-1.\n" "\n" "\t-m\n" "\t--memtype TYPE\n" "\t\tSpecifies the type of memory used to hold the TIFF file content: pinned or\n" "\t\tpageable. Pinned memory is used if 'p' is specified. Pageable memory is used if\n" "\t\t'r' is specified. In case of pinned memory, file content is not copied to\n" "\t\tdevice memory before the decoding process (with a resulting performance impact)\n" "\t\tunless the option -c is also specified (see below).\n" "\t\tDefualt: r (pageable)\n" "\n" "\t-c\n" "\t--copyh2d\n" "\t\tSpecifies to copy the file data to device memory in case the -m option specifies\n" "\t\tto use pinned memory. In case of pageable memory this option has no effect.\n" "\t\tDefault: off.\n" "\n" "\t--decode-out NUM_OUT\n" "\t\tEnables the writing of selected images from the decoded input TIFF file into\n" "\t\tseparate BMP files for inspection. If no argument is passed, only the first\n" "\t\timage is written to disk, otherwise the first NUM_OUT images are written.\n" "\t\tOutput files are named outImage_0.bmp, outImage_1.bmp...\n" "\t\tDefualt: disabled.\n" "\n" "Encoding options:\n" "\n" "\t-E\n" "\t--encode\n" "\t\tThis option enables the encoding of the raster images obtained by decoding the\n" "\t\tinput TIFF file. The images are divided into strips, compressed with LZW and,\n" "\t\toptionally, written into an output TIFF file.\n" "\t\tDefault: disabled.\n" "\n" "\t-r\n" "\t--rowsxstrip\n" "\t\tSpecifies the number of consecutive rows to use to divide the images into\n" "\t\tstrips. Each image is divided in strips of the same size (except possibly the\n" "\t\tlast strip) and then the strips are compressed as independent byte streams.\n" "\t\tThis option is ignored if -E is not specified.\n" "\t\tDefault: 1.\n" "\n" "\t-s\n" "\t--stripalloc\n" "\t\tSpecifies the initial estimate of the maximum size of compressed strips. If\n" "\t\tduring compression one or more strips require more space, the compression is\n" "\t\taborted and restarted automatically with a safe estimate. \n" "\t\tThis option is ignored if -E is not specified.\n" "\t\tDefault: the size, in bytes, of a strip in the uncompressed images.\n" "\n" "\t--encode-out\n" "\t\tEnables the writing of the compressed images to an output TIFF file named\n" "\t\toutFile.tif.\n" "\t\tDefualt: disabled.\n", pname); exit(EXIT_FAILURE); } bool check_identical(nvtiffImageInfo_t * image_info, uint32_t num_images) { bool identical = true; // now check that all subfiles have the same properties for(unsigned int i = 1; i < num_images; i++) { if ((image_info[i].image_width != image_info[i -1].image_width) || (image_info[i].image_height != image_info[i -1].image_height) || (image_info[i].samples_per_pixel != image_info[i -1].samples_per_pixel) || (image_info[i].bits_per_pixel != image_info[i -1].bits_per_pixel) || memcmp(image_info[i].sample_format, image_info[i-1].sample_format, sizeof(short)*image_info[i].samples_per_pixel)|| memcmp(image_info[i].bits_per_sample, image_info[i-1].bits_per_sample, sizeof(short)*image_info[i].samples_per_pixel)) { identical = false; break; } } return identical; } int main(int argc, char **argv) { int devId = 0; char *fname = NULL; int verbose = 0; int decWriteOutN = 0; int frameBeg = INT_MIN; int frameEnd = INT_MAX; int decodeRange = 0; int doEncode = 0; int encRowsPerStrip = 1; unsigned long long encStripAllocSize = 0; int encWriteOut = 0; int och; while(1) { int option_index = 0; static struct option long_options[] = { { "file", required_argument, 0, 'f'}, { "device", required_argument, 0, 'd'}, {"decode-out", optional_argument, 0, 1}, { "frame-beg", required_argument, 0, 'b'}, { "frame-end", required_argument, 0, 'e'}, { "memtype", required_argument, 0, 'm'}, { "copyh2d", required_argument, 0, 'c'}, { "verbose", no_argument, 0, 'v'}, { "encode", no_argument, 0, 'E'}, {"rowsxstrip", required_argument, 0, 'r'}, {"stripalloc", required_argument, 0, 's'}, {"encode-out", optional_argument, 0, 2}, { "help", no_argument, 0, 'h'}, { 0, 0, 0, 0} }; och = getopt_long(argc, argv, "f:d:vo::hb:e:m:cEr:s:", long_options, &option_index); if (och == -1) break; switch (och) { case 0:// handles long opts with non-NULL flag field break; case 'd': devId = atoi(optarg); break; case 'f': fname = strdup(optarg); break; case 'b': frameBeg = atoi(optarg); decodeRange = 1; break; case 'e': frameEnd = atoi(optarg); decodeRange = 1; break; case 'v': verbose++; break; case 1: decWriteOutN = 1; if(!optarg && argv[optind] != NULL && argv[optind][0] != '-') { decWriteOutN = atoi(argv[optind++]); } break; case 'E': doEncode = 1; break; case 'r': encRowsPerStrip = atoi(optarg); break; case 's': encStripAllocSize = atoi(optarg); break; case 2: encWriteOut = 1; break; case 'h': case '?': usage(argv[0]); default: fprintf(stderr, "unknown option: %c\n", och); usage(argv[0]); } } if (!fname) { fprintf(stderr, "Please specify a TIFF file with the -f option!\n"); usage(argv[0]); } if (frameBeg > frameEnd) { fprintf(stderr, "Invalid frame range!\n"); usage(argv[0]); } CHECK_CUDA(hipSetDevice(devId)); hipDeviceProp_t props; printf("\nUsing GPU:\n"); CHECK_CUDA(hipGetDeviceProperties(&props, devId)); printf("\t%2d (%s, %d SMs, %d th/SM max, CC %d.%d, ECC %s)\n", devId, props.name, props.multiProcessorCount, props.maxThreadsPerMultiProcessor, props.major, props.minor, props.ECCEnabled?"on":"off"); printf("\n"); // dummy allocation to initialize subsystems unsigned char *dummy; CHECK_CUDA(hipMalloc(&dummy, 1024*1024*10)); CHECK_CUDA(hipFree(dummy)); hipStream_t stream; CHECK_CUDA(hipStreamCreate(&stream)); nvtiffStream_t tiff_stream; nvtiffDecoder_t decoder; CHECK_NVTIFF(nvtiffStreamCreate(&tiff_stream)); CHECK_NVTIFF(nvtiffDecoderCreate(&decoder, nullptr, nullptr, 0)); CHECK_NVTIFF(nvtiffStreamParseFromFile(fname, tiff_stream)); uint32_t num_images = 0; CHECK_NVTIFF(nvtiffStreamGetNumImages(tiff_stream, &num_images)); std::vector<nvtiffImageInfo_t> image_info(num_images); std::vector<uint8_t*> nvtiff_out(num_images); std::vector<size_t> nvtiff_out_size(num_images); // BEGIN work (possibly) overlapped with H2D copy of the file data if (verbose) { CHECK_NVTIFF(nvtiffStreamPrint(tiff_stream)); } frameBeg = fmax(frameBeg, 0); frameEnd = fmin(frameEnd, num_images-1); const int nDecode = frameEnd-frameBeg+1; for (uint32_t image_id = 0; image_id < num_images; image_id++) { CHECK_NVTIFF(nvtiffStreamGetImageInfo(tiff_stream, image_id, &image_info[image_id])); nvtiff_out_size[image_id] = DIV_UP((size_t)image_info[image_id].bits_per_pixel * image_info[image_id].image_width, 8) * (size_t)image_info[image_id].image_height; if (image_info[image_id].photometric_int == NVTIFF_PHOTOMETRIC_PALETTE) { nvtiff_out_size[image_id] = image_info[image_id].image_width * image_info[image_id].image_height * 3 * sizeof(uint16_t); } CHECK_CUDA(hipMalloc(&nvtiff_out[image_id], nvtiff_out_size[image_id])); } printf("Decoding %u, images [%d, %d], from file %s... ", nDecode, frameBeg, frameEnd, fname); fflush(stdout); auto decode_start = perfclock::now(); if (!decodeRange) { CHECK_NVTIFF(nvtiffDecode(tiff_stream, decoder, nvtiff_out.data(), stream)); } else { CHECK_NVTIFF(nvtiffDecodeRange(tiff_stream, decoder, frameBeg, nDecode, nvtiff_out.data(), stream)); } CHECK_CUDA(hipStreamSynchronize(stream)); auto decode_end = perfclock::now(); double decode_time = std::chrono::duration<float>(decode_end - decode_start).count(); printf("done in %lf secs\n\n", decode_time); if (decWriteOutN) { const uint32_t nout = ::min(decWriteOutN, nDecode); printf("Writing images for the first %d subfile(s)...\n", nout); fflush(stdout); for (uint32_t image_id = 0; image_id < nout; image_id++) { auto& info = image_info[image_id]; std::vector<uint8_t> imageOut_h(nvtiff_out_size[image_id]); CHECK_CUDA(hipMemcpy(imageOut_h.data(), nvtiff_out[image_id], nvtiff_out_size[image_id], hipMemcpyDeviceToHost)); write_image(fname, info, imageOut_h.data(), image_id); } } #ifdef LIBTIFF_TEST TIFF* tif = TIFFOpen(fname, "r"); if (tif) { // we alredy know that all subfiles have the same porperties uint32_t *raster; raster = (uint32_t *)_TIFFmalloc(tiffData->subFiles[0].ncol*tiffData->subFiles[0].nrow * sizeof (uint32_t)); printf("\tDecoding with libTIFF... "); fflush(stdout); auto decode_start = perfclock::now(); for(int i = 0; i < tiffData->nSubFiles; i++) { if (!TIFFReadRGBAImage(tif, tiffData->subFiles[i].ncol, tiffData->subFiles[i].nrow, raster, 0)) { fprintf(stderr, "Error while decoding image %d with libTiff\n", i); break; } TIFFReadDirectory(tif); } auto decode_end = perfclock::now(); double decode_time = std::chrono::duration<float>(decode_end - decode_start).count(); printf("done in %lf secs\n\n", decode_time); _TIFFfree(raster); TIFFClose(tif); } #endif bool identical_multi_tiff = check_identical(image_info.data(), num_images); if(!identical_multi_tiff && doEncode){ printf("Encoding will be skipped since the images within the tiff file do not have identical properties...\n"); } // TODO check identical if (doEncode && identical_multi_tiff) { unsigned int nrow = image_info[0].image_height; unsigned int ncol = image_info[0].image_width; unsigned int photometricInt = (unsigned int)image_info[0].photometric_int; unsigned int planarConf = (unsigned int)image_info[0].planar_config; unsigned short pixelSize = image_info[0].bits_per_pixel/8; unsigned short samplesPerPixel = image_info[0].samples_per_pixel; unsigned short sampleFormat = image_info[0].sample_format[0]; unsigned short *bitsPerSample = (unsigned short *)malloc(sizeof(*bitsPerSample)*samplesPerPixel); memcpy(bitsPerSample, image_info[0].bits_per_sample, sizeof(*bitsPerSample)*samplesPerPixel); CHECK_NVTIFF(nvtiffStreamDestroy(tiff_stream)); CHECK_NVTIFF(nvtiffDecoderDestroy(decoder, stream)); tiff_stream = NULL; decoder = NULL; unsigned int nSubFiles = nDecode; unsigned int nStripOut = DIV_UP(nrow, encRowsPerStrip); unsigned int totStrips = nSubFiles*nStripOut; unsigned long long *stripSize_d = NULL; unsigned long long *stripOffs_d = NULL; unsigned char *stripData_d = NULL; if (encStripAllocSize <= 0) { encStripAllocSize = encRowsPerStrip*ncol*(pixelSize); } CHECK_CUDA(hipMalloc(&stripSize_d, sizeof(*stripSize_d)*totStrips)); CHECK_CUDA(hipMalloc(&stripOffs_d, sizeof(*stripOffs_d)*totStrips)); CHECK_CUDA(hipMalloc(&stripData_d, sizeof(*stripData_d)*totStrips*encStripAllocSize)); nvTiffEncodeCtx_t *ctx = nvTiffEncodeCtxCreate(devId, nSubFiles, nStripOut); printf("Encoding %u, %s %ux%u images using %d rows per strip and %llu bytes per strip... ", nDecode, photometricInt == 2 ? "RGB" : "Grayscale", ncol, nrow, encRowsPerStrip, encStripAllocSize); fflush(stdout); int rv; auto enc_start = perfclock::now(); do { rv = nvTiffEncode(ctx, nrow, ncol, pixelSize, encRowsPerStrip, nSubFiles, nvtiff_out.data(), encStripAllocSize, stripSize_d, stripOffs_d, stripData_d, stream); if (rv != NVTIFF_ENCODE_SUCCESS) { printf("error, while encoding images!\n"); exit(EXIT_FAILURE); } rv = nvTiffEncodeFinalize(ctx, stream); if (rv != NVTIFF_ENCODE_SUCCESS) { if (rv == NVTIFF_ENCODE_COMP_OVERFLOW) { printf("overflow, using %llu bytes per strip...", ctx->stripSizeMax); // * free ctx mem // * reallocate a larger stripData_d buffer // * init a new ctx and retry // * retry compression encStripAllocSize = ctx->stripSizeMax; nvTiffEncodeCtxDestroy(ctx); CHECK_CUDA(hipFree(stripData_d)); CHECK_CUDA(hipMalloc(&stripData_d, sizeof(*stripData_d)*totStrips*encStripAllocSize)); ctx = nvTiffEncodeCtxCreate(devId, nSubFiles, nStripOut); } else { printf("error, while finalizing compressed images!\n"); exit(EXIT_FAILURE); } } } while(rv == NVTIFF_ENCODE_COMP_OVERFLOW); CHECK_CUDA(hipStreamSynchronize(stream)); auto enc_end = perfclock::now(); double enc_time = std::chrono::duration<float>(enc_end - enc_start).count(); printf("done in %lf secs (compr. ratio: %.2lfx)\n\n", enc_time, double(nvtiff_out_size[0])*nSubFiles/ctx->stripSizeTot); //printf("Total size of compressed strips: %llu bytes\n", ctx->stripSizeTot); if (encWriteOut) { unsigned long long *stripSize_h = (unsigned long long *)malloc(sizeof(*stripSize_h)*totStrips); CHECK_CUDA(hipMemcpy(stripSize_h, stripSize_d, sizeof(*stripSize_h)*totStrips, hipMemcpyDeviceToHost)); unsigned long long *stripOffs_h = (unsigned long long *)malloc(sizeof(*stripOffs_h)*totStrips); CHECK_CUDA(hipMemcpy(stripOffs_h, stripOffs_d, sizeof(*stripOffs_h)*totStrips, hipMemcpyDeviceToHost)); unsigned char *stripData_h = (unsigned char *)malloc(sizeof(*stripData_h)*ctx->stripSizeTot); CHECK_CUDA(hipMemcpy(stripData_h, stripData_d, ctx->stripSizeTot, hipMemcpyDeviceToHost)); #if 0 FILE *fp = Fopen("stripData.txt", "w"); size_t stripSize = sizeof(*stripData_h)*encRowsPerStrip*ncol*pixelSize; for(unsigned int i = 0; i < nSubFiles; i++) { fprintf(fp, "compressed image %d:\n", i); for(unsigned int j = 0; j < nStripOut; j++) { unsigned long long off = stripOffs_h[i*nStripOut + j]; unsigned long long len = stripSize_h[i*nStripOut + j]; fprintf(fp, "\tstrip %5u, size: %6llu bytes (ratio: %5.2lfx), " "fingerprint: %02X %02X %02X %02X ... %02X %02X %02X %02X\n", j, len, double(stripSize)/len, stripData_h[off + 0], stripData_h[off + 1], stripData_h[off + 2], stripData_h[off + 3], stripData_h[off + len-4], stripData_h[off + len-3], stripData_h[off + len-2], stripData_h[off + len-1]); } fprintf(fp, "\n"); } fclose(fp); #endif printf("\tWriting %u compressed images to TIFF file... ", nDecode); fflush(stdout); auto write_start = perfclock::now(); nvTiffWriteFile("outFile.tif", VER_REG_TIFF, nSubFiles, nrow, ncol, encRowsPerStrip, samplesPerPixel, bitsPerSample, photometricInt, planarConf, stripSize_h, stripOffs_h, stripData_h, sampleFormat); auto write_end = perfclock::now(); double write_time = std::chrono::duration<float>(write_end - write_start).count(); printf("done in %lf secs\n\n", write_time); free(stripSize_h); free(stripOffs_h); free(stripData_h); } #ifdef LIBTIFF_TEST tif = TIFFOpen("libTiffOut.tif", "w"); if (tif) { unsigned char **imageOut_h = (unsigned char **)Malloc(sizeof(*imageOut_h)*nDecode); for(unsigned int i = 0; i < nDecode; i++) { imageOut_h[i] = (unsigned char *)Malloc(sizeof(*imageOut_h)*imageSize); CHECK_CUDA(hipMemcpy(imageOut_h[i], imageOut_d[i], imageSize, hipMemcpyDeviceToHost)); } size_t stripSize = sizeof(**imageOut_h)*encRowsPerStrip*ncol*pixelSize; printf("\tEncoding with libTIFF... "); fflush(stdout); __t = Wtime(); for(unsigned int i = 0; i < nDecode; i++) { TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, nrow); TIFFSetField(tif, TIFFTAG_IMAGELENGTH, ncol); TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8); TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_LZW); TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, photometricInt); TIFFSetField(tif, TIFFTAG_FILLORDER, 1); TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel); TIFFSetField(tif, TIFFTAG_PLANARCONFIG, planarConf); TIFFSetField(tif, TIFFTAG_ROWSPERSTRIP, encRowsPerStrip); for(unsigned int j = 0; j < nStripOut; j++) { unsigned int currStripSize = stripSize; if (j == nStripOut-1) { currStripSize = imageSize - j*stripSize; } if (-1 == TIFFWriteEncodedStrip(tif, j, imageOut_h[i]+j*stripSize, currStripSize)) { fprintf(stderr, "Error while encoding image %d with libTiff\n", i); break; } } // need to find a way to have libTiff to encode in // memory without writing to disk the last direnctory // after each TIFFWriteDirectory() call TIFFWriteDirectory(tif); //TIFFRewriteDirectory(tif); } __t = Wtime()-__t; printf("done in %lf secs\n\n", __t); TIFFClose(tif); } #endif CHECK_CUDA(hipFree(stripSize_d)); CHECK_CUDA(hipFree(stripOffs_d)); CHECK_CUDA(hipFree(stripData_d)); free(bitsPerSample); nvTiffEncodeCtxDestroy(ctx); } // cleanup for(unsigned int i = 0; i < nDecode; i++) { CHECK_CUDA(hipFree(nvtiff_out[i])); } free(fname); if(tiff_stream) { CHECK_NVTIFF(nvtiffStreamDestroy(tiff_stream)); } if(decoder){ CHECK_NVTIFF(nvtiffDecoderDestroy(decoder, stream)); } CHECK_CUDA(hipStreamDestroy(stream)); CHECK_CUDA(hipDeviceReset()); return 0; }
61327172148b37276eb61a323abcce048991b35d.cu
/* * Copyright (c) 2022 -2023, NVIDIA CORPORATION. All rights reserved. * * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <string> #include <vector> #include <stdio.h> #include <stdlib.h> #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) || defined(_MSC_VER) #define WINDOWS_LEAN_AND_MEAN #define NOMINMAX #include <windows.h> #include "getopt.h" #pragma warning(disable : 4819) const std::string separator = "\\"; #else #include <getopt.h> const std::string separator = "/"; #endif #include <fstream> #include <iostream> #include <chrono> #include <cuda_runtime.h> #include <nvtiff.h> using perfclock = std::chrono::high_resolution_clock; #define DIV_UP(a, b) (((a) + ((b)-1)) / (b)) #define CHECK_CUDA(call) \ { \ cudaError_t err = call; \ if (cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", __FILE__, __LINE__, cudaGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } #define CHECK_NVTIFF(call) \ { \ nvtiffStatus_t _e = (call); \ if (_e != NVTIFF_STATUS_SUCCESS) { \ fprintf(stderr, "nvtiff error code %d in file '%s' in line %i\n", _e, __FILE__, __LINE__); \ exit(EXIT_FAILURE); \ } \ } //#define LIBTIFF_TEST #ifdef LIBTIFF_TEST #include <tiffio.h> #endif #define MAX_STR_LEN (256) void write_pnm(const char* filename, unsigned char* chan, uint32_t ld, uint32_t width, uint32_t height, uint32_t BPP, uint32_t numcomp, uint32_t write_out_numcomp) { std::ofstream rOutputStream(filename); if (!rOutputStream) { std::cerr << "Cannot open output file: " << filename << std::endl; return; } if (numcomp == 1) { rOutputStream << "P5\n"; } else { rOutputStream << "P6\n"; } rOutputStream << "#nvTIFF\n"; rOutputStream << width << " " << height << "\n"; rOutputStream << (1 << BPP) - 1 << "\n"; if (BPP == 8) { for (uint32_t y = 0; y < height; y++) { for (uint32_t x = 0; x < width; x++) { for (uint32_t c = 0; c < write_out_numcomp; c++) { rOutputStream << chan[(y * ld + x) * numcomp + c]; } } } } else { uint16_t* chan16 = reinterpret_cast<uint16_t*>(chan); for (uint32_t y = 0; y < height; y++) { for (uint32_t x = 0; x < width; x++) { for (uint32_t c = 0; c < write_out_numcomp; c++) { uint32_t pix_val = chan16[(y * ld + x) * numcomp + c]; rOutputStream << static_cast<unsigned char>((pix_val) >> 8) << static_cast<unsigned char>((pix_val)&0xff); } } } } return; } int write_image(std::string input_filename, nvtiffImageInfo& image_info, unsigned char* chan, uint32_t image_id) { // Get the file name, without extension. // This will be used to rename the output file. size_t position = input_filename.rfind(separator); std::string out_filename = (std::string::npos == position) ? input_filename : input_filename.substr(position + 1, input_filename.size()); position = out_filename.rfind("."); out_filename = (std::string::npos == position) ? out_filename : out_filename.substr(0, position); out_filename += "_nvtiff_out_" + std::to_string(image_id); uint32_t num_samples = image_info.samples_per_pixel; uint32_t samples_written_to_file = image_info.samples_per_pixel; if (image_info.samples_per_pixel == 3 || image_info.samples_per_pixel == 4 || image_info.photometric_int == NVTIFF_PHOTOMETRIC_PALETTE) { out_filename += ".ppm"; samples_written_to_file = 3; } else if (image_info.samples_per_pixel == 1) { out_filename += ".pgm"; } else { printf("Unable to write image with %d samples per pixel, continuing to the next image..\n", image_info.samples_per_pixel); return EXIT_SUCCESS; } uint32_t bits_per_sample = image_info.bits_per_sample[0]; if (image_info.photometric_int == NVTIFF_PHOTOMETRIC_PALETTE) { num_samples = 3; samples_written_to_file = 3; bits_per_sample = 16; } if (bits_per_sample == 16 || bits_per_sample == 8) { write_pnm(out_filename.c_str(), chan, image_info.image_width, image_info.image_width, image_info.image_height, bits_per_sample, num_samples, samples_written_to_file); } else { printf("Unable to write to file for this set of tiff image, continuing to next image\n"); } return EXIT_SUCCESS; } static void usage(const char *pname) { fprintf(stdout, "Usage:\n" "%s [options] -f|--file <TIFF_FILE>\n" "\n" "General options:\n" "\n" "\t-d DEVICE_ID\n" "\t--device DEVICE_ID\n" "\t\tSpecifies the GPU to use for images decoding/encoding.\n" "\t\tDefault: device 0 is used.\n" "\n" "\t-v\n" "\t--verbose\n" "\t\tPrints some information about the decoded TIFF file.\n" "\n" "\t-h\n" "\t--help\n" "\t\tPrints this help\n" "\n" "Decoding options:\n" "\n" "\t-f TIFF_FILE\n" "\t--file TIFF_FILE\n" "\t\tSpecifies the TIFF file to decode. The code supports both single and multi-image\n" "\t\ttiff files with the following limitations: \n" "\t\t * color space must be either Grayscale (PhotometricInterp.=1) or RGB (=2) \n" "\t\t * image data compressed with LZW (Compression=5) or uncompressed \n" "\t\t * pixel components stored in \"chunky\" format (RGB..., PlanarConfiguration=1)\n" "\t\t for RGB images \n" "\t\t * image data must be organized in Strips, not Tiles \n" "\t\t * pixels of RGB images must be represented with at most 4 components \n" "\t\t * each component must be represented exactly with: \n" "\t\t * 8 bits for LZW compressed images \n" "\t\t * 8, 16 or 32 bits for uncompressed images \n" "\t\t * all images in the file must have the same properties \n" "\n" "\t-b BEG_FRM\n" "\t--frame-beg BEG_FRM\n" "\t\tSpecifies the image id in the input TIFF file to start decoding from. The image\n" "\t\tid must be a value between 0 and the total number of images in the file minus 1.\n" "\t\tValues less than 0 are clamped to 0.\n" "\t\tDefault: 0\n" "\n" "\t-e END_FRM\n" "\t--frame-end END_FRM\n" "\t\tSpecifies the image id in the input TIFF file to stop decoding at (included).\n" "\t\tThe image id must be a value between 0 and the total number of images in the\n" "\t\tfile minus 1. Values greater than num_images-1 are clamped to num_images-1.\n" "\t\tDefault: num_images-1.\n" "\n" "\t-m\n" "\t--memtype TYPE\n" "\t\tSpecifies the type of memory used to hold the TIFF file content: pinned or\n" "\t\tpageable. Pinned memory is used if 'p' is specified. Pageable memory is used if\n" "\t\t'r' is specified. In case of pinned memory, file content is not copied to\n" "\t\tdevice memory before the decoding process (with a resulting performance impact)\n" "\t\tunless the option -c is also specified (see below).\n" "\t\tDefualt: r (pageable)\n" "\n" "\t-c\n" "\t--copyh2d\n" "\t\tSpecifies to copy the file data to device memory in case the -m option specifies\n" "\t\tto use pinned memory. In case of pageable memory this option has no effect.\n" "\t\tDefault: off.\n" "\n" "\t--decode-out NUM_OUT\n" "\t\tEnables the writing of selected images from the decoded input TIFF file into\n" "\t\tseparate BMP files for inspection. If no argument is passed, only the first\n" "\t\timage is written to disk, otherwise the first NUM_OUT images are written.\n" "\t\tOutput files are named outImage_0.bmp, outImage_1.bmp...\n" "\t\tDefualt: disabled.\n" "\n" "Encoding options:\n" "\n" "\t-E\n" "\t--encode\n" "\t\tThis option enables the encoding of the raster images obtained by decoding the\n" "\t\tinput TIFF file. The images are divided into strips, compressed with LZW and,\n" "\t\toptionally, written into an output TIFF file.\n" "\t\tDefault: disabled.\n" "\n" "\t-r\n" "\t--rowsxstrip\n" "\t\tSpecifies the number of consecutive rows to use to divide the images into\n" "\t\tstrips. Each image is divided in strips of the same size (except possibly the\n" "\t\tlast strip) and then the strips are compressed as independent byte streams.\n" "\t\tThis option is ignored if -E is not specified.\n" "\t\tDefault: 1.\n" "\n" "\t-s\n" "\t--stripalloc\n" "\t\tSpecifies the initial estimate of the maximum size of compressed strips. If\n" "\t\tduring compression one or more strips require more space, the compression is\n" "\t\taborted and restarted automatically with a safe estimate. \n" "\t\tThis option is ignored if -E is not specified.\n" "\t\tDefault: the size, in bytes, of a strip in the uncompressed images.\n" "\n" "\t--encode-out\n" "\t\tEnables the writing of the compressed images to an output TIFF file named\n" "\t\toutFile.tif.\n" "\t\tDefualt: disabled.\n", pname); exit(EXIT_FAILURE); } bool check_identical(nvtiffImageInfo_t * image_info, uint32_t num_images) { bool identical = true; // now check that all subfiles have the same properties for(unsigned int i = 1; i < num_images; i++) { if ((image_info[i].image_width != image_info[i -1].image_width) || (image_info[i].image_height != image_info[i -1].image_height) || (image_info[i].samples_per_pixel != image_info[i -1].samples_per_pixel) || (image_info[i].bits_per_pixel != image_info[i -1].bits_per_pixel) || memcmp(image_info[i].sample_format, image_info[i-1].sample_format, sizeof(short)*image_info[i].samples_per_pixel)|| memcmp(image_info[i].bits_per_sample, image_info[i-1].bits_per_sample, sizeof(short)*image_info[i].samples_per_pixel)) { identical = false; break; } } return identical; } int main(int argc, char **argv) { int devId = 0; char *fname = NULL; int verbose = 0; int decWriteOutN = 0; int frameBeg = INT_MIN; int frameEnd = INT_MAX; int decodeRange = 0; int doEncode = 0; int encRowsPerStrip = 1; unsigned long long encStripAllocSize = 0; int encWriteOut = 0; int och; while(1) { int option_index = 0; static struct option long_options[] = { { "file", required_argument, 0, 'f'}, { "device", required_argument, 0, 'd'}, {"decode-out", optional_argument, 0, 1}, { "frame-beg", required_argument, 0, 'b'}, { "frame-end", required_argument, 0, 'e'}, { "memtype", required_argument, 0, 'm'}, { "copyh2d", required_argument, 0, 'c'}, { "verbose", no_argument, 0, 'v'}, { "encode", no_argument, 0, 'E'}, {"rowsxstrip", required_argument, 0, 'r'}, {"stripalloc", required_argument, 0, 's'}, {"encode-out", optional_argument, 0, 2}, { "help", no_argument, 0, 'h'}, { 0, 0, 0, 0} }; och = getopt_long(argc, argv, "f:d:vo::hb:e:m:cEr:s:", long_options, &option_index); if (och == -1) break; switch (och) { case 0:// handles long opts with non-NULL flag field break; case 'd': devId = atoi(optarg); break; case 'f': fname = strdup(optarg); break; case 'b': frameBeg = atoi(optarg); decodeRange = 1; break; case 'e': frameEnd = atoi(optarg); decodeRange = 1; break; case 'v': verbose++; break; case 1: decWriteOutN = 1; if(!optarg && argv[optind] != NULL && argv[optind][0] != '-') { decWriteOutN = atoi(argv[optind++]); } break; case 'E': doEncode = 1; break; case 'r': encRowsPerStrip = atoi(optarg); break; case 's': encStripAllocSize = atoi(optarg); break; case 2: encWriteOut = 1; break; case 'h': case '?': usage(argv[0]); default: fprintf(stderr, "unknown option: %c\n", och); usage(argv[0]); } } if (!fname) { fprintf(stderr, "Please specify a TIFF file with the -f option!\n"); usage(argv[0]); } if (frameBeg > frameEnd) { fprintf(stderr, "Invalid frame range!\n"); usage(argv[0]); } CHECK_CUDA(cudaSetDevice(devId)); cudaDeviceProp props; printf("\nUsing GPU:\n"); CHECK_CUDA(cudaGetDeviceProperties(&props, devId)); printf("\t%2d (%s, %d SMs, %d th/SM max, CC %d.%d, ECC %s)\n", devId, props.name, props.multiProcessorCount, props.maxThreadsPerMultiProcessor, props.major, props.minor, props.ECCEnabled?"on":"off"); printf("\n"); // dummy allocation to initialize subsystems unsigned char *dummy; CHECK_CUDA(cudaMalloc(&dummy, 1024*1024*10)); CHECK_CUDA(cudaFree(dummy)); cudaStream_t stream; CHECK_CUDA(cudaStreamCreate(&stream)); nvtiffStream_t tiff_stream; nvtiffDecoder_t decoder; CHECK_NVTIFF(nvtiffStreamCreate(&tiff_stream)); CHECK_NVTIFF(nvtiffDecoderCreate(&decoder, nullptr, nullptr, 0)); CHECK_NVTIFF(nvtiffStreamParseFromFile(fname, tiff_stream)); uint32_t num_images = 0; CHECK_NVTIFF(nvtiffStreamGetNumImages(tiff_stream, &num_images)); std::vector<nvtiffImageInfo_t> image_info(num_images); std::vector<uint8_t*> nvtiff_out(num_images); std::vector<size_t> nvtiff_out_size(num_images); // BEGIN work (possibly) overlapped with H2D copy of the file data if (verbose) { CHECK_NVTIFF(nvtiffStreamPrint(tiff_stream)); } frameBeg = fmax(frameBeg, 0); frameEnd = fmin(frameEnd, num_images-1); const int nDecode = frameEnd-frameBeg+1; for (uint32_t image_id = 0; image_id < num_images; image_id++) { CHECK_NVTIFF(nvtiffStreamGetImageInfo(tiff_stream, image_id, &image_info[image_id])); nvtiff_out_size[image_id] = DIV_UP((size_t)image_info[image_id].bits_per_pixel * image_info[image_id].image_width, 8) * (size_t)image_info[image_id].image_height; if (image_info[image_id].photometric_int == NVTIFF_PHOTOMETRIC_PALETTE) { nvtiff_out_size[image_id] = image_info[image_id].image_width * image_info[image_id].image_height * 3 * sizeof(uint16_t); } CHECK_CUDA(cudaMalloc(&nvtiff_out[image_id], nvtiff_out_size[image_id])); } printf("Decoding %u, images [%d, %d], from file %s... ", nDecode, frameBeg, frameEnd, fname); fflush(stdout); auto decode_start = perfclock::now(); if (!decodeRange) { CHECK_NVTIFF(nvtiffDecode(tiff_stream, decoder, nvtiff_out.data(), stream)); } else { CHECK_NVTIFF(nvtiffDecodeRange(tiff_stream, decoder, frameBeg, nDecode, nvtiff_out.data(), stream)); } CHECK_CUDA(cudaStreamSynchronize(stream)); auto decode_end = perfclock::now(); double decode_time = std::chrono::duration<float>(decode_end - decode_start).count(); printf("done in %lf secs\n\n", decode_time); if (decWriteOutN) { const uint32_t nout = std::min(decWriteOutN, nDecode); printf("Writing images for the first %d subfile(s)...\n", nout); fflush(stdout); for (uint32_t image_id = 0; image_id < nout; image_id++) { auto& info = image_info[image_id]; std::vector<uint8_t> imageOut_h(nvtiff_out_size[image_id]); CHECK_CUDA(cudaMemcpy(imageOut_h.data(), nvtiff_out[image_id], nvtiff_out_size[image_id], cudaMemcpyDeviceToHost)); write_image(fname, info, imageOut_h.data(), image_id); } } #ifdef LIBTIFF_TEST TIFF* tif = TIFFOpen(fname, "r"); if (tif) { // we alredy know that all subfiles have the same porperties uint32_t *raster; raster = (uint32_t *)_TIFFmalloc(tiffData->subFiles[0].ncol*tiffData->subFiles[0].nrow * sizeof (uint32_t)); printf("\tDecoding with libTIFF... "); fflush(stdout); auto decode_start = perfclock::now(); for(int i = 0; i < tiffData->nSubFiles; i++) { if (!TIFFReadRGBAImage(tif, tiffData->subFiles[i].ncol, tiffData->subFiles[i].nrow, raster, 0)) { fprintf(stderr, "Error while decoding image %d with libTiff\n", i); break; } TIFFReadDirectory(tif); } auto decode_end = perfclock::now(); double decode_time = std::chrono::duration<float>(decode_end - decode_start).count(); printf("done in %lf secs\n\n", decode_time); _TIFFfree(raster); TIFFClose(tif); } #endif bool identical_multi_tiff = check_identical(image_info.data(), num_images); if(!identical_multi_tiff && doEncode){ printf("Encoding will be skipped since the images within the tiff file do not have identical properties...\n"); } // TODO check identical if (doEncode && identical_multi_tiff) { unsigned int nrow = image_info[0].image_height; unsigned int ncol = image_info[0].image_width; unsigned int photometricInt = (unsigned int)image_info[0].photometric_int; unsigned int planarConf = (unsigned int)image_info[0].planar_config; unsigned short pixelSize = image_info[0].bits_per_pixel/8; unsigned short samplesPerPixel = image_info[0].samples_per_pixel; unsigned short sampleFormat = image_info[0].sample_format[0]; unsigned short *bitsPerSample = (unsigned short *)malloc(sizeof(*bitsPerSample)*samplesPerPixel); memcpy(bitsPerSample, image_info[0].bits_per_sample, sizeof(*bitsPerSample)*samplesPerPixel); CHECK_NVTIFF(nvtiffStreamDestroy(tiff_stream)); CHECK_NVTIFF(nvtiffDecoderDestroy(decoder, stream)); tiff_stream = NULL; decoder = NULL; unsigned int nSubFiles = nDecode; unsigned int nStripOut = DIV_UP(nrow, encRowsPerStrip); unsigned int totStrips = nSubFiles*nStripOut; unsigned long long *stripSize_d = NULL; unsigned long long *stripOffs_d = NULL; unsigned char *stripData_d = NULL; if (encStripAllocSize <= 0) { encStripAllocSize = encRowsPerStrip*ncol*(pixelSize); } CHECK_CUDA(cudaMalloc(&stripSize_d, sizeof(*stripSize_d)*totStrips)); CHECK_CUDA(cudaMalloc(&stripOffs_d, sizeof(*stripOffs_d)*totStrips)); CHECK_CUDA(cudaMalloc(&stripData_d, sizeof(*stripData_d)*totStrips*encStripAllocSize)); nvTiffEncodeCtx_t *ctx = nvTiffEncodeCtxCreate(devId, nSubFiles, nStripOut); printf("Encoding %u, %s %ux%u images using %d rows per strip and %llu bytes per strip... ", nDecode, photometricInt == 2 ? "RGB" : "Grayscale", ncol, nrow, encRowsPerStrip, encStripAllocSize); fflush(stdout); int rv; auto enc_start = perfclock::now(); do { rv = nvTiffEncode(ctx, nrow, ncol, pixelSize, encRowsPerStrip, nSubFiles, nvtiff_out.data(), encStripAllocSize, stripSize_d, stripOffs_d, stripData_d, stream); if (rv != NVTIFF_ENCODE_SUCCESS) { printf("error, while encoding images!\n"); exit(EXIT_FAILURE); } rv = nvTiffEncodeFinalize(ctx, stream); if (rv != NVTIFF_ENCODE_SUCCESS) { if (rv == NVTIFF_ENCODE_COMP_OVERFLOW) { printf("overflow, using %llu bytes per strip...", ctx->stripSizeMax); // * free ctx mem // * reallocate a larger stripData_d buffer // * init a new ctx and retry // * retry compression encStripAllocSize = ctx->stripSizeMax; nvTiffEncodeCtxDestroy(ctx); CHECK_CUDA(cudaFree(stripData_d)); CHECK_CUDA(cudaMalloc(&stripData_d, sizeof(*stripData_d)*totStrips*encStripAllocSize)); ctx = nvTiffEncodeCtxCreate(devId, nSubFiles, nStripOut); } else { printf("error, while finalizing compressed images!\n"); exit(EXIT_FAILURE); } } } while(rv == NVTIFF_ENCODE_COMP_OVERFLOW); CHECK_CUDA(cudaStreamSynchronize(stream)); auto enc_end = perfclock::now(); double enc_time = std::chrono::duration<float>(enc_end - enc_start).count(); printf("done in %lf secs (compr. ratio: %.2lfx)\n\n", enc_time, double(nvtiff_out_size[0])*nSubFiles/ctx->stripSizeTot); //printf("Total size of compressed strips: %llu bytes\n", ctx->stripSizeTot); if (encWriteOut) { unsigned long long *stripSize_h = (unsigned long long *)malloc(sizeof(*stripSize_h)*totStrips); CHECK_CUDA(cudaMemcpy(stripSize_h, stripSize_d, sizeof(*stripSize_h)*totStrips, cudaMemcpyDeviceToHost)); unsigned long long *stripOffs_h = (unsigned long long *)malloc(sizeof(*stripOffs_h)*totStrips); CHECK_CUDA(cudaMemcpy(stripOffs_h, stripOffs_d, sizeof(*stripOffs_h)*totStrips, cudaMemcpyDeviceToHost)); unsigned char *stripData_h = (unsigned char *)malloc(sizeof(*stripData_h)*ctx->stripSizeTot); CHECK_CUDA(cudaMemcpy(stripData_h, stripData_d, ctx->stripSizeTot, cudaMemcpyDeviceToHost)); #if 0 FILE *fp = Fopen("stripData.txt", "w"); size_t stripSize = sizeof(*stripData_h)*encRowsPerStrip*ncol*pixelSize; for(unsigned int i = 0; i < nSubFiles; i++) { fprintf(fp, "compressed image %d:\n", i); for(unsigned int j = 0; j < nStripOut; j++) { unsigned long long off = stripOffs_h[i*nStripOut + j]; unsigned long long len = stripSize_h[i*nStripOut + j]; fprintf(fp, "\tstrip %5u, size: %6llu bytes (ratio: %5.2lfx), " "fingerprint: %02X %02X %02X %02X ... %02X %02X %02X %02X\n", j, len, double(stripSize)/len, stripData_h[off + 0], stripData_h[off + 1], stripData_h[off + 2], stripData_h[off + 3], stripData_h[off + len-4], stripData_h[off + len-3], stripData_h[off + len-2], stripData_h[off + len-1]); } fprintf(fp, "\n"); } fclose(fp); #endif printf("\tWriting %u compressed images to TIFF file... ", nDecode); fflush(stdout); auto write_start = perfclock::now(); nvTiffWriteFile("outFile.tif", VER_REG_TIFF, nSubFiles, nrow, ncol, encRowsPerStrip, samplesPerPixel, bitsPerSample, photometricInt, planarConf, stripSize_h, stripOffs_h, stripData_h, sampleFormat); auto write_end = perfclock::now(); double write_time = std::chrono::duration<float>(write_end - write_start).count(); printf("done in %lf secs\n\n", write_time); free(stripSize_h); free(stripOffs_h); free(stripData_h); } #ifdef LIBTIFF_TEST tif = TIFFOpen("libTiffOut.tif", "w"); if (tif) { unsigned char **imageOut_h = (unsigned char **)Malloc(sizeof(*imageOut_h)*nDecode); for(unsigned int i = 0; i < nDecode; i++) { imageOut_h[i] = (unsigned char *)Malloc(sizeof(*imageOut_h)*imageSize); CHECK_CUDA(cudaMemcpy(imageOut_h[i], imageOut_d[i], imageSize, cudaMemcpyDeviceToHost)); } size_t stripSize = sizeof(**imageOut_h)*encRowsPerStrip*ncol*pixelSize; printf("\tEncoding with libTIFF... "); fflush(stdout); __t = Wtime(); for(unsigned int i = 0; i < nDecode; i++) { TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, nrow); TIFFSetField(tif, TIFFTAG_IMAGELENGTH, ncol); TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8); TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_LZW); TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, photometricInt); TIFFSetField(tif, TIFFTAG_FILLORDER, 1); TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel); TIFFSetField(tif, TIFFTAG_PLANARCONFIG, planarConf); TIFFSetField(tif, TIFFTAG_ROWSPERSTRIP, encRowsPerStrip); for(unsigned int j = 0; j < nStripOut; j++) { unsigned int currStripSize = stripSize; if (j == nStripOut-1) { currStripSize = imageSize - j*stripSize; } if (-1 == TIFFWriteEncodedStrip(tif, j, imageOut_h[i]+j*stripSize, currStripSize)) { fprintf(stderr, "Error while encoding image %d with libTiff\n", i); break; } } // need to find a way to have libTiff to encode in // memory without writing to disk the last direnctory // after each TIFFWriteDirectory() call TIFFWriteDirectory(tif); //TIFFRewriteDirectory(tif); } __t = Wtime()-__t; printf("done in %lf secs\n\n", __t); TIFFClose(tif); } #endif CHECK_CUDA(cudaFree(stripSize_d)); CHECK_CUDA(cudaFree(stripOffs_d)); CHECK_CUDA(cudaFree(stripData_d)); free(bitsPerSample); nvTiffEncodeCtxDestroy(ctx); } // cleanup for(unsigned int i = 0; i < nDecode; i++) { CHECK_CUDA(cudaFree(nvtiff_out[i])); } free(fname); if(tiff_stream) { CHECK_NVTIFF(nvtiffStreamDestroy(tiff_stream)); } if(decoder){ CHECK_NVTIFF(nvtiffDecoderDestroy(decoder, stream)); } CHECK_CUDA(cudaStreamDestroy(stream)); CHECK_CUDA(cudaDeviceReset()); return 0; }
df79493cae5a882cc6d54e9b26f0f9a74512a513.hip
// !!! This is a file automatically generated by hipify!!! /* * myersGPU-col.cu * * Created on: 25/11/2013 * Author: achacon */ #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define NUM_BITS 4 #define NUM_BASES 5 #define SIZE_HW_WORD 32 #define MAX_VALUE 0xFFFFFFFF #define HIGH_MASK_32 0x80000000 #define LOW_MASK_32 0x00000001 #define SIZE_WARP 32 #define BASES_PER_ENTRY 8 #define BASES_PER_THREAD 256 #define ENTRIES_PER_THREAD (BASES_PER_THREAD / SIZE_HW_WORD) #define SPACE_PER_QUERY ((((SIZE_QUERY-1)/BASES_PER_THREAD)+1) * BASES_PER_THREAD) #define BASES_PER_WARP (SIZE_WARP * BASES_PER_THREAD) #define QUERIES_PER_WARP (BASES_PER_WARP / SPACE_PER_QUERY) #define THREADS_PER_QUERY (SPACE_PER_QUERY / BASES_PER_THREAD) #define WARP_THREADS_IDLE (SIZE_WARP - (THREADS_PER_QUERY * QUERIES_PER_WARP)) #define WARP_THREADS_ACTIVE (THREADS_PER_QUERY * QUERIES_PER_WARP) #define HANDLE_ERROR(error) (HandleError(error, __FILE__, __LINE__ )) #ifndef MIN #define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b)) #endif // output temporal carry in internal register #define UADD__CARRY_OUT(c, a, b) \ asm volatile("add.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); // add & output with temporal carry of internal register #define UADD__IN_CARRY_OUT(c, a, b) \ asm volatile("addc.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); // add with temporal carry of internal register #define UADD__IN_CARRY(c, a, b) \ asm volatile("addc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); #if (REG == 0) #define REG_PH Ph_A #define REG_MH Mh_A #endif #if (REG == 1) #define REG_PH Ph_B #define REG_MH Mh_B #endif #if (REG == 2) #define REG_PH Ph_C #define REG_MH Mh_C #endif #if (REG == 3) #define REG_PH Ph_D #define REG_MH Mh_D #endif #if (REG == 4) #define REG_PH Ph_E #define REG_MH Mh_E #endif #if (REG == 5) #define REG_PH Ph_F #define REG_MH Mh_F #endif #if (REG == 6) #define REG_PH Ph_G #define REG_MH Mh_G #endif #if (REG == 7) #define REG_PH Ph_H #define REG_MH Mh_H #endif typedef struct { uint32_t bitmap[NUM_BASES]; } qryEntry_t; typedef struct { uint32_t column; uint32_t score; } resEntry_t; typedef struct { uint32_t query; uint32_t position; } candInfo_t; typedef struct { uint32_t size; uint32_t numEntries; uint32_t *h_reference; uint32_t *d_reference; } ref_t; typedef struct { uint32_t numResults; resEntry_t* h_results; resEntry_t* d_results; } res_t; typedef struct { uint32_t totalSizeQueries; uint32_t totalQueriesEntries; uint32_t sizeQueries; uint32_t numQueries; uint32_t numCandidates; float distance; qryEntry_t *h_queries; qryEntry_t *d_queries; candInfo_t *h_candidates; candInfo_t *d_candidates; uint32_t *d_Pv; uint32_t *d_Mv; } qry_t; extern "C" static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString(err), file, line ); exit( EXIT_FAILURE ); } } #if defined(SHUFFLE) || defined(BALLOT) #else inline __device__ void shared_collaborative_shift(uint32_t value_A, uint32_t value_B, uint32_t value_C, uint32_t value_D, uint32_t value_E, uint32_t value_F, uint32_t value_G, uint32_t value_H, const uint32_t localThreadIdx, const uint32_t intraWarpIdx, volatile uint32_t *interBuff, uint32_t* res_A, uint32_t* res_B, uint32_t* res_C, uint32_t* res_D, uint32_t* res_E, uint32_t* res_F, uint32_t* res_G, uint32_t* res_H) { uint32_t carry; #ifdef FUNNEL interBuff[intraWarpIdx + 1] = value_H; carry = interBuff[intraWarpIdx]; carry = (localThreadIdx) ? carry : 0; value_H = __funnelshift_lc(value_G, value_H, 1); value_G = __funnelshift_lc(value_F, value_G, 1); value_F = __funnelshift_lc(value_E, value_F, 1); value_E = __funnelshift_lc(value_D, value_E, 1); value_D = __funnelshift_lc(value_C, value_D, 1); value_C = __funnelshift_lc(value_B, value_C, 1); value_B = __funnelshift_lc(value_A, value_B, 1); value_A = __funnelshift_lc(carry, value_A, 1); #else interBuff[intraWarpIdx + 1] = value_H; carry = interBuff[intraWarpIdx]; carry = (localThreadIdx) ? carry : 0; value_H = (value_G >> 31) | (value_H << 1); value_G = (value_F >> 31) | (value_G << 1); value_F = (value_E >> 31) | (value_F << 1); value_E = (value_D >> 31) | (value_E << 1); value_D = (value_C >> 31) | (value_D << 1); value_C = (value_B >> 31) | (value_C << 1); value_B = (value_A >> 31) | (value_B << 1); value_A = (carry >> 31) | (value_A << 1); #endif (* res_A) = value_A; (* res_B) = value_B; (* res_C) = value_C; (* res_D) = value_D; (* res_E) = value_E; (* res_F) = value_F; (* res_G) = value_G; (* res_H) = value_H; } inline __device__ void shared_collaborative_sum(const uint32_t a_A, const uint32_t a_B, const uint32_t a_C, const uint32_t a_D, const uint32_t a_E, const uint32_t a_F, const uint32_t a_G, const uint32_t a_H, const uint32_t b_A, const uint32_t b_B, const uint32_t b_C, const uint32_t b_D, const uint32_t b_E, const uint32_t b_F, const uint32_t b_G, const uint32_t b_H, const uint32_t localThreadIdx, const uint32_t intraWarpIdx, volatile uint32_t *interBuff, uint32_t* sum_A, uint32_t* sum_B, uint32_t* sum_C, uint32_t* sum_D, uint32_t* sum_E, uint32_t* sum_F, uint32_t* sum_G, uint32_t* sum_H) { uint32_t carry, c_A, c_B, c_C, c_D, c_E, c_F, c_G, c_H; UADD__CARRY_OUT (c_A, a_A, b_A) UADD__IN_CARRY_OUT(c_B, a_B, b_B) UADD__IN_CARRY_OUT(c_C, a_C, b_C) UADD__IN_CARRY_OUT(c_D, a_D, b_D) UADD__IN_CARRY_OUT(c_E, a_E, b_E) UADD__IN_CARRY_OUT(c_F, a_F, b_F) UADD__IN_CARRY_OUT(c_G, a_G, b_G) UADD__IN_CARRY_OUT(c_H, a_H, b_H) UADD__IN_CARRY (carry, 0, 0) /* interBuff[intraWarpIdx + 1] = carry; carry = interBuff[intraWarpIdx]; carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) */ while(__any(carry)){ interBuff[intraWarpIdx + 1] = carry; carry = interBuff[intraWarpIdx]; carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) } (* sum_A) = c_A; (* sum_B) = c_B; (* sum_C) = c_C; (* sum_D) = c_D; (* sum_E) = c_E; (* sum_F) = c_F; (* sum_G) = c_G; (* sum_H) = c_H; } #endif #ifdef SHUFFLE inline __device__ void shuffle_collaborative_shift(uint32_t value_A, uint32_t value_B, uint32_t value_C, uint32_t value_D, uint32_t value_E, uint32_t value_F, uint32_t value_G, uint32_t value_H, const uint32_t localThreadIdx, uint32_t* res_A, uint32_t* res_B, uint32_t* res_C, uint32_t* res_D, uint32_t* res_E, uint32_t* res_F, uint32_t* res_G, uint32_t* res_H) { uint32_t carry; #ifdef FUNNEL carry = __shfl_up((int) value_H, 1); carry = (localThreadIdx) ? carry : 0; value_H = __funnelshift_lc(value_G, value_H, 1); value_G = __funnelshift_lc(value_F, value_G, 1); value_F = __funnelshift_lc(value_E, value_F, 1); value_E = __funnelshift_lc(value_D, value_E, 1); value_D = __funnelshift_lc(value_C, value_D, 1); value_C = __funnelshift_lc(value_B, value_C, 1); value_B = __funnelshift_lc(value_A, value_B, 1); value_A = __funnelshift_lc(carry, value_A, 1); #else carry = __shfl_up((int) value_H, 1); carry = (localThreadIdx) ? carry : 0; value_H = (value_G >> 31) | (value_H << 1); value_G = (value_F >> 31) | (value_G << 1); value_F = (value_E >> 31) | (value_F << 1); value_E = (value_D >> 31) | (value_E << 1); value_D = (value_C >> 31) | (value_D << 1); value_C = (value_B >> 31) | (value_C << 1); value_B = (value_A >> 31) | (value_B << 1); value_A = (carry >> 31) | (value_A << 1); #endif (* res_A) = value_A; (* res_B) = value_B; (* res_C) = value_C; (* res_D) = value_D; (* res_E) = value_E; (* res_F) = value_F; (* res_G) = value_G; (* res_H) = value_H; } inline __device__ void shuffle_collaborative_sum(const uint32_t a_A, const uint32_t a_B, const uint32_t a_C, const uint32_t a_D, const uint32_t a_E, const uint32_t a_F, const uint32_t a_G, const uint32_t a_H, const uint32_t b_A, const uint32_t b_B, const uint32_t b_C, const uint32_t b_D, const uint32_t b_E, const uint32_t b_F, const uint32_t b_G, const uint32_t b_H, const uint32_t localThreadIdx, uint32_t* sum_A, uint32_t* sum_B, uint32_t* sum_C, uint32_t* sum_D, uint32_t* sum_E, uint32_t* sum_F, uint32_t* sum_G, uint32_t* sum_H) { uint32_t carry, c_A, c_B, c_C, c_D, c_E, c_F, c_G, c_H; UADD__CARRY_OUT (c_A, a_A, b_A) UADD__IN_CARRY_OUT(c_B, a_B, b_B) UADD__IN_CARRY_OUT(c_C, a_C, b_C) UADD__IN_CARRY_OUT(c_D, a_D, b_D) UADD__IN_CARRY_OUT(c_E, a_E, b_E) UADD__IN_CARRY_OUT(c_F, a_F, b_F) UADD__IN_CARRY_OUT(c_G, a_G, b_G) UADD__IN_CARRY_OUT(c_H, a_H, b_H) UADD__IN_CARRY (carry, 0, 0) /* carry = __shfl_up((int) (carry), 1); carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) */ while(__any(carry)){ carry = __shfl_up((int) (carry), 1); carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) } (* sum_A) = c_A; (* sum_B) = c_B; (* sum_C) = c_C; (* sum_D) = c_D; (* sum_E) = c_E; (* sum_F) = c_F; (* sum_G) = c_G; (* sum_H) = c_H; } #endif #ifdef BALLOT inline __device__ void ballot_collaborative_shift(uint32_t value_A, uint32_t value_B, uint32_t value_C, uint32_t value_D, uint32_t value_E, uint32_t value_F, uint32_t value_G, uint32_t value_H, const uint32_t localThreadIdx, const uint32_t intraWarpIdx, uint32_t* res_A, uint32_t* res_B, uint32_t* res_C, uint32_t* res_D, uint32_t* res_E, uint32_t* res_F, uint32_t* res_G, uint32_t* res_H) { uint32_t carry; carry = ((__ballot(value_H >> 31) << 1) & (1 << intraWarpIdx)) != 0; carry = (localThreadIdx) ? carry : 0; value_H = (value_G >> 31) | (value_H << 1); value_G = (value_F >> 31) | (value_G << 1); value_F = (value_E >> 31) | (value_F << 1); value_E = (value_D >> 31) | (value_E << 1); value_D = (value_C >> 31) | (value_D << 1); value_C = (value_B >> 31) | (value_C << 1); value_B = (value_A >> 31) | (value_B << 1); value_A = carry | (value_A << 1); (* res_A) = value_A; (* res_B) = value_B; (* res_C) = value_C; (* res_D) = value_D; (* res_E) = value_E; (* res_F) = value_F; (* res_G) = value_G; (* res_H) = value_H; } inline __device__ void ballot_collaborative_sum(const uint32_t a_A, const uint32_t a_B, const uint32_t a_C, const uint32_t a_D, const uint32_t a_E, const uint32_t a_F, const uint32_t a_G, const uint32_t a_H, const uint32_t b_A, const uint32_t b_B, const uint32_t b_C, const uint32_t b_D, const uint32_t b_E, const uint32_t b_F, const uint32_t b_G, const uint32_t b_H, const uint32_t localThreadIdx, const uint32_t intraWarpIdx, uint32_t* sum_A, uint32_t* sum_B, uint32_t* sum_C, uint32_t* sum_D, uint32_t* sum_E, uint32_t* sum_F, uint32_t* sum_G, uint32_t* sum_H) { uint32_t carry, c_A, c_B, c_C, c_D, c_E, c_F, c_G, c_H; UADD__CARRY_OUT (c_A, a_A, b_A) UADD__IN_CARRY_OUT(c_B, a_B, b_B) UADD__IN_CARRY_OUT(c_C, a_C, b_C) UADD__IN_CARRY_OUT(c_D, a_D, b_D) UADD__IN_CARRY_OUT(c_E, a_E, b_E) UADD__IN_CARRY_OUT(c_F, a_F, b_F) UADD__IN_CARRY_OUT(c_G, a_G, b_G) UADD__IN_CARRY_OUT(c_H, a_H, b_H) UADD__IN_CARRY (carry, 0, 0) /* carry = ((__ballot(carry) << 1) & (1 << intraWarpIdx)) != 0; carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) */ while(__any(carry)){ carry = ((__ballot(carry) << 1) & (1 << intraWarpIdx)) != 0; carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) } (* sum_A) = c_A; (* sum_B) = c_B; (* sum_C) = c_C; (* sum_D) = c_D; (* sum_E) = c_E; (* sum_F) = c_F; (* sum_G) = c_G; (* sum_H) = c_H; } #endif inline __device__ uint32_t selectEq(const uint32_t indexBase, const uint32_t Eq0, const uint32_t Eq1, const uint32_t Eq2, const uint32_t Eq3, const uint32_t Eq4) { uint32_t Eq = Eq0; Eq = (indexBase == 1) ? Eq1 : Eq; Eq = (indexBase == 2) ? Eq2 : Eq; Eq = (indexBase == 3) ? Eq3 : Eq; Eq = (indexBase == 4) ? Eq4 : Eq; return Eq; } __global__ void myersKernel(const qryEntry_t *d_queries, const uint32_t * __restrict d_reference, const candInfo_t *d_candidates, resEntry_t *d_results, const uint32_t sizeCandidate, const uint32_t sizeQueries, const uint32_t sizeRef, const uint32_t numEntriesPerQuery, const uint32_t numEntriesPerCandidate, const uint32_t numCandidates, const uint32_t numThreads) { const uint32_t * __restrict localCandidate; uint32_t Ph_A, Mh_A, Pv_A, Mv_A, Xv_A, Xh_A, Eq_A, tEq_A; uint32_t Ph_B, Mh_B, Pv_B, Mv_B, Xv_B, Xh_B, Eq_B, tEq_B; uint32_t Ph_C, Mh_C, Pv_C, Mv_C, Xv_C, Xh_C, Eq_C, tEq_C; uint32_t Ph_D, Mh_D, Pv_D, Mv_D, Xv_D, Xh_D, Eq_D, tEq_D; uint32_t Ph_E, Mh_E, Pv_E, Mv_E, Xv_E, Xh_E, Eq_E, tEq_E; uint32_t Ph_F, Mh_F, Pv_F, Mv_F, Xv_F, Xh_F, Eq_F, tEq_F; uint32_t Ph_G, Mh_G, Pv_G, Mv_G, Xv_G, Xh_G, Eq_G, tEq_G; uint32_t Ph_H, Mh_H, Pv_H, Mv_H, Xv_H, Xh_H, Eq_H, tEq_H; uint32_t Eq0_A, Eq1_A, Eq2_A, Eq3_A, Eq4_A; uint32_t Eq0_B, Eq1_B, Eq2_B, Eq3_B, Eq4_B; uint32_t Eq0_C, Eq1_C, Eq2_C, Eq3_C, Eq4_C; uint32_t Eq0_D, Eq1_D, Eq2_D, Eq3_D, Eq4_D; uint32_t Eq0_E, Eq1_E, Eq2_E, Eq3_E, Eq4_E; uint32_t Eq0_F, Eq1_F, Eq2_F, Eq3_F, Eq4_F; uint32_t Eq0_G, Eq1_G, Eq2_G, Eq3_G, Eq4_G; uint32_t Eq0_H, Eq1_H, Eq2_H, Eq3_H, Eq4_H; uint32_t sum_A, sum_B, sum_C, sum_D, sum_E, sum_F, sum_G, sum_H; uint32_t candidate; uint32_t entry, idColumn = 0, indexBase; uint32_t globalThreadIdx = blockIdx.x * MAX_THREADS_PER_SM + threadIdx.x; uint32_t intraQueryThreadIdx = (threadIdx.x % SIZE_WARP) % THREADS_PER_QUERY; uint32_t idCandidate = ((globalThreadIdx / SIZE_WARP) * QUERIES_PER_WARP) + ((threadIdx.x % SIZE_WARP) / THREADS_PER_QUERY); #if defined(SHUFFLE) || defined(BALLOT) #else __shared__ uint32_t globalInterBuff[(SIZE_WARP + 1) * (CUDA_NUM_THREADS/SIZE_WARP)]; uint32_t *localInterBuff = globalInterBuff + ((threadIdx.x/SIZE_WARP) * (SIZE_WARP + 1)); #endif #ifndef SHUFFLE uint32_t intraWarpIdx = threadIdx.x % SIZE_WARP; #endif if ((threadIdx.x < MAX_THREADS_PER_SM) && (idCandidate < numCandidates)){ uint32_t positionRef = d_candidates[idCandidate].position; uint32_t entryRef = positionRef / BASES_PER_ENTRY; int32_t score = sizeQueries, minScore = sizeQueries; uint32_t minColumn = 0; uint32_t mask = ((sizeQueries % SIZE_HW_WORD) == 0) ? HIGH_MASK_32 : 1 << ((sizeQueries % SIZE_HW_WORD) - 1); uint32_t intraBase, idEntry; if((positionRef < sizeRef) && ((sizeRef - positionRef) > sizeCandidate)){ localCandidate = d_reference + entryRef; Pv_A = MAX_VALUE; Mv_A = 0; Pv_B = MAX_VALUE; Mv_B = 0; Pv_C = MAX_VALUE; Mv_C = 0; Pv_D = MAX_VALUE; Mv_D = 0; Pv_E = MAX_VALUE; Mv_E = 0; Pv_F = MAX_VALUE; Mv_F = 0; Pv_G = MAX_VALUE; Mv_G = 0; Pv_H = MAX_VALUE; Mv_H = 0; entry = (d_candidates[idCandidate].query * numEntriesPerQuery) + (ENTRIES_PER_THREAD * intraQueryThreadIdx); Eq0_A = d_queries[entry].bitmap[0]; Eq1_A = d_queries[entry].bitmap[1]; Eq2_A = d_queries[entry].bitmap[2]; Eq3_A = d_queries[entry].bitmap[3]; Eq4_A = d_queries[entry].bitmap[4]; Eq0_B = d_queries[entry + 1].bitmap[0]; Eq1_B = d_queries[entry + 1].bitmap[1]; Eq2_B = d_queries[entry + 1].bitmap[2]; Eq3_B = d_queries[entry + 1].bitmap[3]; Eq4_B = d_queries[entry + 1].bitmap[4]; Eq0_C = d_queries[entry + 2].bitmap[0]; Eq1_C = d_queries[entry + 2].bitmap[1]; Eq2_C = d_queries[entry + 2].bitmap[2]; Eq3_C = d_queries[entry + 2].bitmap[3]; Eq4_C = d_queries[entry + 2].bitmap[4]; Eq0_D = d_queries[entry + 3].bitmap[0]; Eq1_D = d_queries[entry + 3].bitmap[1]; Eq2_D = d_queries[entry + 3].bitmap[2]; Eq3_D = d_queries[entry + 3].bitmap[3]; Eq4_D = d_queries[entry + 3].bitmap[4]; Eq0_E = d_queries[entry + 4].bitmap[0]; Eq1_E = d_queries[entry + 4].bitmap[1]; Eq2_E = d_queries[entry + 4].bitmap[2]; Eq3_E = d_queries[entry + 4].bitmap[3]; Eq4_E = d_queries[entry + 4].bitmap[4]; Eq0_F = d_queries[entry + 5].bitmap[0]; Eq1_F = d_queries[entry + 5].bitmap[1]; Eq2_F = d_queries[entry + 5].bitmap[2]; Eq3_F = d_queries[entry + 5].bitmap[3]; Eq4_F = d_queries[entry + 5].bitmap[4]; Eq0_G = d_queries[entry + 6].bitmap[0]; Eq1_G = d_queries[entry + 6].bitmap[1]; Eq2_G = d_queries[entry + 6].bitmap[2]; Eq3_G = d_queries[entry + 6].bitmap[3]; Eq4_G = d_queries[entry + 6].bitmap[4]; Eq0_H = d_queries[entry + 7].bitmap[0]; Eq1_H = d_queries[entry + 7].bitmap[1]; Eq2_H = d_queries[entry + 7].bitmap[2]; Eq3_H = d_queries[entry + 7].bitmap[3]; Eq4_H = d_queries[entry + 7].bitmap[4]; for(idEntry = 0; idEntry < numEntriesPerCandidate; idEntry++){ candidate = localCandidate[idEntry]; for(intraBase = 0; intraBase < BASES_PER_ENTRY; intraBase++){ indexBase = candidate & 0x07; Eq_A = selectEq(indexBase, Eq0_A, Eq1_A, Eq2_A, Eq3_A, Eq4_A); Eq_B = selectEq(indexBase, Eq0_B, Eq1_B, Eq2_B, Eq3_B, Eq4_B); Eq_C = selectEq(indexBase, Eq0_C, Eq1_C, Eq2_C, Eq3_C, Eq4_C); Eq_D = selectEq(indexBase, Eq0_D, Eq1_D, Eq2_D, Eq3_D, Eq4_D); Eq_E = selectEq(indexBase, Eq0_E, Eq1_E, Eq2_E, Eq3_E, Eq4_E); Eq_F = selectEq(indexBase, Eq0_F, Eq1_F, Eq2_F, Eq3_F, Eq4_F); Eq_G = selectEq(indexBase, Eq0_G, Eq1_G, Eq2_G, Eq3_G, Eq4_G); Eq_H = selectEq(indexBase, Eq0_H, Eq1_H, Eq2_H, Eq3_H, Eq4_H); Xv_A = Eq_A | Mv_A; Xv_B = Eq_B | Mv_B; Xv_C = Eq_C | Mv_C; Xv_D = Eq_D | Mv_D; Xv_E = Eq_E | Mv_E; Xv_F = Eq_F | Mv_F; Xv_G = Eq_G | Mv_G; Xv_H = Eq_H | Mv_H; tEq_A = Eq_A & Pv_A; tEq_B = Eq_B & Pv_B; tEq_C = Eq_C & Pv_C; tEq_D = Eq_D & Pv_D; tEq_E = Eq_E & Pv_E; tEq_F = Eq_F & Pv_F; tEq_G = Eq_G & Pv_G; tEq_H = Eq_H & Pv_H; #ifdef SHUFFLE shuffle_collaborative_sum(tEq_A, tEq_B, tEq_C, tEq_D, tEq_E, tEq_F, tEq_G, tEq_H, Pv_A, Pv_B, Pv_C, Pv_D, Pv_E, Pv_F, Pv_G, Pv_H, intraQueryThreadIdx, &sum_A, &sum_B, &sum_C, &sum_D, &sum_E, &sum_F, &sum_G, &sum_H); #else #ifdef BALLOT ballot_collaborative_sum(tEq_A, tEq_B, tEq_C, tEq_D, tEq_E, tEq_F, tEq_G, tEq_H, Pv_A, Pv_B, Pv_C, Pv_D, Pv_E, Pv_F, Pv_G, Pv_H, intraQueryThreadIdx, intraWarpIdx, &sum_A, &sum_B, &sum_C, &sum_D, &sum_E, &sum_F, &sum_G, &sum_H); #else shared_collaborative_sum(tEq_A, tEq_B, tEq_C, tEq_D, tEq_E, tEq_F, tEq_G, tEq_H, Pv_A, Pv_B, Pv_C, Pv_D, Pv_E, Pv_F, Pv_G, Pv_H, intraQueryThreadIdx, intraWarpIdx, localInterBuff, &sum_A, &sum_B, &sum_C, &sum_D, &sum_E, &sum_F, &sum_G, &sum_H); #endif #endif Xh_A = (sum_A ^ Pv_A) | Eq_A; Xh_B = (sum_B ^ Pv_B) | Eq_B; Xh_C = (sum_C ^ Pv_C) | Eq_C; Xh_D = (sum_D ^ Pv_D) | Eq_D; Xh_E = (sum_E ^ Pv_E) | Eq_E; Xh_F = (sum_F ^ Pv_F) | Eq_F; Xh_G = (sum_G ^ Pv_G) | Eq_G; Xh_H = (sum_H ^ Pv_H) | Eq_H; Ph_A = Mv_A | ~(Xh_A | Pv_A); Ph_B = Mv_B | ~(Xh_B | Pv_B); Ph_C = Mv_C | ~(Xh_C | Pv_C); Ph_D = Mv_D | ~(Xh_D | Pv_D); Ph_E = Mv_E | ~(Xh_E | Pv_E); Ph_F = Mv_F | ~(Xh_F | Pv_F); Ph_G = Mv_G | ~(Xh_G | Pv_G); Ph_H = Mv_H | ~(Xh_H | Pv_H); Mh_A = Pv_A & Xh_A; Mh_B = Pv_B & Xh_B; Mh_C = Pv_C & Xh_C; Mh_D = Pv_D & Xh_D; Mh_E = Pv_E & Xh_E; Mh_F = Pv_F & Xh_F; Mh_G = Pv_G & Xh_G; Mh_H = Pv_H & Xh_H; score += ((REG_PH & mask) != 0) - ((REG_MH & mask) != 0); #ifdef SHUFFLE shuffle_collaborative_shift(Ph_A, Ph_B, Ph_C, Ph_D, Ph_E, Ph_F, Ph_G, Ph_H, intraQueryThreadIdx, &Ph_A, &Ph_B, &Ph_C, &Ph_D, &Ph_E, &Ph_F, &Ph_G, &Ph_H); shuffle_collaborative_shift(Mh_A, Mh_B, Mh_C, Mh_D, Mh_E, Mh_F, Mh_G, Mh_H, intraQueryThreadIdx, &Mh_A, &Mh_B, &Mh_C, &Mh_D, &Mh_E, &Mh_F, &Mh_G, &Mh_H); #else #ifdef BALLOT ballot_collaborative_shift(Ph_A, Ph_B, Ph_C, Ph_D, Ph_E, Ph_F, Ph_G, Ph_H, intraQueryThreadIdx, intraWarpIdx, &Ph_A, &Ph_B, &Ph_C, &Ph_D, &Ph_E, &Ph_F, &Ph_G, &Ph_H); ballot_collaborative_shift(Mh_A, Mh_B, Mh_C, Mh_D, Mh_E, Mh_F, Mh_G, Mh_H, intraQueryThreadIdx, intraWarpIdx, &Mh_A, &Mh_B, &Mh_C, &Mh_D, &Mh_E, &Mh_F, &Mh_G, &Mh_H); #else shared_collaborative_shift(Ph_A, Ph_B, Ph_C, Ph_D, Ph_E, Ph_F, Ph_G, Ph_H, intraQueryThreadIdx, intraWarpIdx, localInterBuff, &Ph_A, &Ph_B, &Ph_C, &Ph_D, &Ph_E, &Ph_F, &Ph_G, &Ph_H); shared_collaborative_shift(Mh_A, Mh_B, Mh_C, Mh_D, Mh_E, Mh_F, Mh_G, Mh_H, intraQueryThreadIdx, intraWarpIdx, localInterBuff, &Mh_A, &Mh_B, &Mh_C, &Mh_D, &Mh_E, &Mh_F, &Mh_G, &Mh_H); #endif #endif Pv_A = Mh_A | ~(Xv_A | Ph_A); Pv_B = Mh_B | ~(Xv_B | Ph_B); Pv_C = Mh_C | ~(Xv_C | Ph_C); Pv_D = Mh_D | ~(Xv_D | Ph_D); Pv_E = Mh_E | ~(Xv_E | Ph_E); Pv_F = Mh_F | ~(Xv_F | Ph_F); Pv_G = Mh_G | ~(Xv_G | Ph_G); Pv_H = Mh_H | ~(Xv_H | Ph_H); Mv_A = Ph_A & Xv_A; Mv_B = Ph_B & Xv_B; Mv_C = Ph_C & Xv_C; Mv_D = Ph_D & Xv_D; Mv_E = Ph_E & Xv_E; Mv_F = Ph_F & Xv_F; Mv_G = Ph_G & Xv_G; Mv_H = Ph_H & Xv_H; candidate >>= 4; minColumn = (score < minScore) ? idColumn : minColumn; minScore = (score < minScore) ? score : minScore; idColumn++; } } if(intraQueryThreadIdx == (THREADS_PER_QUERY - 1)){ d_results[idCandidate].column = minColumn; d_results[idCandidate].score = minScore; } } } } extern "C" void computeAllQueriesGPU(void *reference, void *queries, void *results) { ref_t *ref = (ref_t *) reference; qry_t *qry = (qry_t *) queries; res_t *res = (res_t *) results; uint32_t blocksPerGrid, threadsPerBlock = MAX_THREADS_PER_SM; uint32_t sizeCandidate = qry->sizeQueries * (1 + 2 * qry->distance); uint32_t numEntriesPerQuery = (qry->sizeQueries / SIZE_HW_WORD) + ((qry->sizeQueries % SIZE_HW_WORD) ? 1 : 0); uint32_t numEntriesPerCandidate = (sizeCandidate / BASES_PER_ENTRY) + ((sizeCandidate % BASES_PER_ENTRY) ? 2 : 1); uint32_t maxCandidates, numCandidates, lastCandidates, processedCandidates; uint32_t numLaunches, kernelIdx, maxThreads; uint32_t activeThreads, idleThreads, numThreads; printf("-- Word size: %d - Query Size: %d - Query Space: %d - Last Register: %d\n-- Threads per Query: %d - Queries per Warp: %d - Threads Idle: %d\n", BASES_PER_THREAD, SIZE_QUERY, SPACE_PER_QUERY, REG, THREADS_PER_QUERY, QUERIES_PER_WARP, WARP_THREADS_IDLE); #ifdef FUNNEL printf("-- OPT: funnelShift [ON] -- "); #else printf("-- OPT: funnelShift [OFF] -- "); #endif #ifdef SHUFFLE printf("shuffle [ON] -- "); #else printf("shuffle [OFF] -- "); #endif #ifdef BALLOT printf("ballot [ON]\n"); #else printf("ballot [OFF]\n"); #endif printf("\n"); /////////LAUNCH GPU KERNELS: //LAUNCH KERNELS FOR KEPLERs GPUs if(DEVICE == 0){ activeThreads = (qry->numCandidates * THREADS_PER_QUERY); idleThreads = ((activeThreads / WARP_THREADS_ACTIVE) * WARP_THREADS_IDLE); numThreads = activeThreads + idleThreads; blocksPerGrid = (numThreads / MAX_THREADS_PER_SM) + ((numThreads % MAX_THREADS_PER_SM) ? 1 : 0); printf("KEPLER: LAUNCH KERNEL 0 -- Bloques: %d - Th_block %d - Th_sm %d\n", blocksPerGrid, threadsPerBlock, MAX_THREADS_PER_SM); hipLaunchKernelGGL(( myersKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, qry->d_queries, ref->d_reference, qry->d_candidates, res->d_results, sizeCandidate, qry->sizeQueries, ref->size, numEntriesPerQuery, numEntriesPerCandidate, qry->numCandidates, numThreads); hipDeviceSynchronize(); } //LAUNCH KERNELS FOR FERMIs GPUs if(DEVICE == 1){ maxThreads = threadsPerBlock * 65535; maxCandidates = (maxThreads / SIZE_WARP) * QUERIES_PER_WARP; numLaunches = (qry->numCandidates / maxCandidates) + ((qry->numCandidates / maxCandidates) ? 1 : 0); lastCandidates = qry->numCandidates; processedCandidates = 0; for(kernelIdx=0; kernelIdx<numLaunches; kernelIdx++){ numCandidates = MIN(lastCandidates, maxCandidates); activeThreads = (numCandidates * THREADS_PER_QUERY); idleThreads = ((activeThreads / WARP_THREADS_ACTIVE) * WARP_THREADS_IDLE); numThreads = activeThreads + idleThreads; blocksPerGrid = (numThreads / MAX_THREADS_PER_SM) + ((numThreads % MAX_THREADS_PER_SM) ? 1 : 0); printf("FERMI: LAUNCH KERNEL %d -- Bloques: %d - Th_block %d - Th_sm %d\n", kernelIdx, blocksPerGrid, threadsPerBlock, MAX_THREADS_PER_SM); hipLaunchKernelGGL(( myersKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, qry->d_queries, ref->d_reference, qry->d_candidates + processedCandidates, res->d_results + processedCandidates, sizeCandidate, qry->sizeQueries, ref->size, numEntriesPerQuery, numEntriesPerCandidate, numCandidates, numThreads); lastCandidates -= numCandidates; processedCandidates += numCandidates; } hipDeviceSynchronize(); } } extern "C" int transferCPUtoGPU(void *reference, void *queries, void *results) { ref_t *ref = (ref_t *) reference; qry_t *qry = (qry_t *) queries; res_t *res = (res_t *) results; HANDLE_ERROR(hipSetDevice(DEVICE)); //allocate & transfer Binary Reference to GPU HANDLE_ERROR(hipMalloc((void**) &ref->d_reference, ((uint64_t) ref->numEntries) * sizeof(uint32_t))); HANDLE_ERROR(hipMemcpy(ref->d_reference, ref->h_reference, ((uint64_t) ref->numEntries) * sizeof(uint32_t), hipMemcpyHostToDevice)); //allocate & transfer Binary Queries to GPU HANDLE_ERROR(hipMalloc((void**) &qry->d_queries, qry->totalQueriesEntries * sizeof(qryEntry_t))); HANDLE_ERROR(hipMemcpy(qry->d_queries, qry->h_queries, qry->totalQueriesEntries * sizeof(qryEntry_t), hipMemcpyHostToDevice)); //allocate & transfer Candidates to GPU HANDLE_ERROR(hipMalloc((void**) &qry->d_candidates, qry->numCandidates * sizeof(candInfo_t))); HANDLE_ERROR(hipMemcpy(qry->d_candidates, qry->h_candidates, qry->numCandidates * sizeof(candInfo_t), hipMemcpyHostToDevice)); //allocate Results HANDLE_ERROR(hipMalloc((void**) &res->d_results, res->numResults * sizeof(resEntry_t))); HANDLE_ERROR(hipMemcpy(res->d_results, res->h_results, res->numResults * sizeof(resEntry_t), hipMemcpyHostToDevice)); return (0); } extern "C" int transferGPUtoCPU(void *results) { res_t *res = (res_t *) results; HANDLE_ERROR(hipMemcpy(res->h_results, res->d_results, res->numResults * sizeof(resEntry_t), hipMemcpyDeviceToHost)); return (0); } extern "C" int freeReferenceGPU(void *reference) { ref_t *ref = (ref_t *) reference; if(ref->d_reference != NULL){ hipFree(ref->d_reference); ref->d_reference=NULL; } return(0); } extern "C" int freeQueriesGPU(void *queries) { qry_t *qry = (qry_t *) queries; if(qry->d_queries != NULL){ hipFree(qry->d_queries); qry->d_queries=NULL; } if(qry->d_candidates != NULL){ hipFree(qry->d_candidates); qry->d_candidates = NULL; } return(0); } extern "C" int freeResultsGPU(void *results) { res_t *res = (res_t *) results; if(res->d_results != NULL){ hipFree(res->d_results); res->d_results=NULL; } return(0); } carry-ou
df79493cae5a882cc6d54e9b26f0f9a74512a513.cu
/* * myersGPU-col.cu * * Created on: 25/11/2013 * Author: achacon */ #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <cuda.h> #define NUM_BITS 4 #define NUM_BASES 5 #define SIZE_HW_WORD 32 #define MAX_VALUE 0xFFFFFFFF #define HIGH_MASK_32 0x80000000 #define LOW_MASK_32 0x00000001 #define SIZE_WARP 32 #define BASES_PER_ENTRY 8 #define BASES_PER_THREAD 256 #define ENTRIES_PER_THREAD (BASES_PER_THREAD / SIZE_HW_WORD) #define SPACE_PER_QUERY ((((SIZE_QUERY-1)/BASES_PER_THREAD)+1) * BASES_PER_THREAD) #define BASES_PER_WARP (SIZE_WARP * BASES_PER_THREAD) #define QUERIES_PER_WARP (BASES_PER_WARP / SPACE_PER_QUERY) #define THREADS_PER_QUERY (SPACE_PER_QUERY / BASES_PER_THREAD) #define WARP_THREADS_IDLE (SIZE_WARP - (THREADS_PER_QUERY * QUERIES_PER_WARP)) #define WARP_THREADS_ACTIVE (THREADS_PER_QUERY * QUERIES_PER_WARP) #define HANDLE_ERROR(error) (HandleError(error, __FILE__, __LINE__ )) #ifndef MIN #define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b)) #endif // output temporal carry in internal register #define UADD__CARRY_OUT(c, a, b) \ asm volatile("add.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); // add & output with temporal carry of internal register #define UADD__IN_CARRY_OUT(c, a, b) \ asm volatile("addc.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); // add with temporal carry of internal register #define UADD__IN_CARRY(c, a, b) \ asm volatile("addc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); #if (REG == 0) #define REG_PH Ph_A #define REG_MH Mh_A #endif #if (REG == 1) #define REG_PH Ph_B #define REG_MH Mh_B #endif #if (REG == 2) #define REG_PH Ph_C #define REG_MH Mh_C #endif #if (REG == 3) #define REG_PH Ph_D #define REG_MH Mh_D #endif #if (REG == 4) #define REG_PH Ph_E #define REG_MH Mh_E #endif #if (REG == 5) #define REG_PH Ph_F #define REG_MH Mh_F #endif #if (REG == 6) #define REG_PH Ph_G #define REG_MH Mh_G #endif #if (REG == 7) #define REG_PH Ph_H #define REG_MH Mh_H #endif typedef struct { uint32_t bitmap[NUM_BASES]; } qryEntry_t; typedef struct { uint32_t column; uint32_t score; } resEntry_t; typedef struct { uint32_t query; uint32_t position; } candInfo_t; typedef struct { uint32_t size; uint32_t numEntries; uint32_t *h_reference; uint32_t *d_reference; } ref_t; typedef struct { uint32_t numResults; resEntry_t* h_results; resEntry_t* d_results; } res_t; typedef struct { uint32_t totalSizeQueries; uint32_t totalQueriesEntries; uint32_t sizeQueries; uint32_t numQueries; uint32_t numCandidates; float distance; qryEntry_t *h_queries; qryEntry_t *d_queries; candInfo_t *h_candidates; candInfo_t *d_candidates; uint32_t *d_Pv; uint32_t *d_Mv; } qry_t; extern "C" static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString(err), file, line ); exit( EXIT_FAILURE ); } } #if defined(SHUFFLE) || defined(BALLOT) #else inline __device__ void shared_collaborative_shift(uint32_t value_A, uint32_t value_B, uint32_t value_C, uint32_t value_D, uint32_t value_E, uint32_t value_F, uint32_t value_G, uint32_t value_H, const uint32_t localThreadIdx, const uint32_t intraWarpIdx, volatile uint32_t *interBuff, uint32_t* res_A, uint32_t* res_B, uint32_t* res_C, uint32_t* res_D, uint32_t* res_E, uint32_t* res_F, uint32_t* res_G, uint32_t* res_H) { uint32_t carry; #ifdef FUNNEL interBuff[intraWarpIdx + 1] = value_H; carry = interBuff[intraWarpIdx]; carry = (localThreadIdx) ? carry : 0; value_H = __funnelshift_lc(value_G, value_H, 1); value_G = __funnelshift_lc(value_F, value_G, 1); value_F = __funnelshift_lc(value_E, value_F, 1); value_E = __funnelshift_lc(value_D, value_E, 1); value_D = __funnelshift_lc(value_C, value_D, 1); value_C = __funnelshift_lc(value_B, value_C, 1); value_B = __funnelshift_lc(value_A, value_B, 1); value_A = __funnelshift_lc(carry, value_A, 1); #else interBuff[intraWarpIdx + 1] = value_H; carry = interBuff[intraWarpIdx]; carry = (localThreadIdx) ? carry : 0; value_H = (value_G >> 31) | (value_H << 1); value_G = (value_F >> 31) | (value_G << 1); value_F = (value_E >> 31) | (value_F << 1); value_E = (value_D >> 31) | (value_E << 1); value_D = (value_C >> 31) | (value_D << 1); value_C = (value_B >> 31) | (value_C << 1); value_B = (value_A >> 31) | (value_B << 1); value_A = (carry >> 31) | (value_A << 1); #endif (* res_A) = value_A; (* res_B) = value_B; (* res_C) = value_C; (* res_D) = value_D; (* res_E) = value_E; (* res_F) = value_F; (* res_G) = value_G; (* res_H) = value_H; } inline __device__ void shared_collaborative_sum(const uint32_t a_A, const uint32_t a_B, const uint32_t a_C, const uint32_t a_D, const uint32_t a_E, const uint32_t a_F, const uint32_t a_G, const uint32_t a_H, const uint32_t b_A, const uint32_t b_B, const uint32_t b_C, const uint32_t b_D, const uint32_t b_E, const uint32_t b_F, const uint32_t b_G, const uint32_t b_H, const uint32_t localThreadIdx, const uint32_t intraWarpIdx, volatile uint32_t *interBuff, uint32_t* sum_A, uint32_t* sum_B, uint32_t* sum_C, uint32_t* sum_D, uint32_t* sum_E, uint32_t* sum_F, uint32_t* sum_G, uint32_t* sum_H) { uint32_t carry, c_A, c_B, c_C, c_D, c_E, c_F, c_G, c_H; UADD__CARRY_OUT (c_A, a_A, b_A) UADD__IN_CARRY_OUT(c_B, a_B, b_B) UADD__IN_CARRY_OUT(c_C, a_C, b_C) UADD__IN_CARRY_OUT(c_D, a_D, b_D) UADD__IN_CARRY_OUT(c_E, a_E, b_E) UADD__IN_CARRY_OUT(c_F, a_F, b_F) UADD__IN_CARRY_OUT(c_G, a_G, b_G) UADD__IN_CARRY_OUT(c_H, a_H, b_H) UADD__IN_CARRY (carry, 0, 0) /* interBuff[intraWarpIdx + 1] = carry; carry = interBuff[intraWarpIdx]; carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) */ while(__any(carry)){ interBuff[intraWarpIdx + 1] = carry; carry = interBuff[intraWarpIdx]; carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) } (* sum_A) = c_A; (* sum_B) = c_B; (* sum_C) = c_C; (* sum_D) = c_D; (* sum_E) = c_E; (* sum_F) = c_F; (* sum_G) = c_G; (* sum_H) = c_H; } #endif #ifdef SHUFFLE inline __device__ void shuffle_collaborative_shift(uint32_t value_A, uint32_t value_B, uint32_t value_C, uint32_t value_D, uint32_t value_E, uint32_t value_F, uint32_t value_G, uint32_t value_H, const uint32_t localThreadIdx, uint32_t* res_A, uint32_t* res_B, uint32_t* res_C, uint32_t* res_D, uint32_t* res_E, uint32_t* res_F, uint32_t* res_G, uint32_t* res_H) { uint32_t carry; #ifdef FUNNEL carry = __shfl_up((int) value_H, 1); carry = (localThreadIdx) ? carry : 0; value_H = __funnelshift_lc(value_G, value_H, 1); value_G = __funnelshift_lc(value_F, value_G, 1); value_F = __funnelshift_lc(value_E, value_F, 1); value_E = __funnelshift_lc(value_D, value_E, 1); value_D = __funnelshift_lc(value_C, value_D, 1); value_C = __funnelshift_lc(value_B, value_C, 1); value_B = __funnelshift_lc(value_A, value_B, 1); value_A = __funnelshift_lc(carry, value_A, 1); #else carry = __shfl_up((int) value_H, 1); carry = (localThreadIdx) ? carry : 0; value_H = (value_G >> 31) | (value_H << 1); value_G = (value_F >> 31) | (value_G << 1); value_F = (value_E >> 31) | (value_F << 1); value_E = (value_D >> 31) | (value_E << 1); value_D = (value_C >> 31) | (value_D << 1); value_C = (value_B >> 31) | (value_C << 1); value_B = (value_A >> 31) | (value_B << 1); value_A = (carry >> 31) | (value_A << 1); #endif (* res_A) = value_A; (* res_B) = value_B; (* res_C) = value_C; (* res_D) = value_D; (* res_E) = value_E; (* res_F) = value_F; (* res_G) = value_G; (* res_H) = value_H; } inline __device__ void shuffle_collaborative_sum(const uint32_t a_A, const uint32_t a_B, const uint32_t a_C, const uint32_t a_D, const uint32_t a_E, const uint32_t a_F, const uint32_t a_G, const uint32_t a_H, const uint32_t b_A, const uint32_t b_B, const uint32_t b_C, const uint32_t b_D, const uint32_t b_E, const uint32_t b_F, const uint32_t b_G, const uint32_t b_H, const uint32_t localThreadIdx, uint32_t* sum_A, uint32_t* sum_B, uint32_t* sum_C, uint32_t* sum_D, uint32_t* sum_E, uint32_t* sum_F, uint32_t* sum_G, uint32_t* sum_H) { uint32_t carry, c_A, c_B, c_C, c_D, c_E, c_F, c_G, c_H; UADD__CARRY_OUT (c_A, a_A, b_A) UADD__IN_CARRY_OUT(c_B, a_B, b_B) UADD__IN_CARRY_OUT(c_C, a_C, b_C) UADD__IN_CARRY_OUT(c_D, a_D, b_D) UADD__IN_CARRY_OUT(c_E, a_E, b_E) UADD__IN_CARRY_OUT(c_F, a_F, b_F) UADD__IN_CARRY_OUT(c_G, a_G, b_G) UADD__IN_CARRY_OUT(c_H, a_H, b_H) UADD__IN_CARRY (carry, 0, 0) /* carry = __shfl_up((int) (carry), 1); carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) */ while(__any(carry)){ carry = __shfl_up((int) (carry), 1); carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) } (* sum_A) = c_A; (* sum_B) = c_B; (* sum_C) = c_C; (* sum_D) = c_D; (* sum_E) = c_E; (* sum_F) = c_F; (* sum_G) = c_G; (* sum_H) = c_H; } #endif #ifdef BALLOT inline __device__ void ballot_collaborative_shift(uint32_t value_A, uint32_t value_B, uint32_t value_C, uint32_t value_D, uint32_t value_E, uint32_t value_F, uint32_t value_G, uint32_t value_H, const uint32_t localThreadIdx, const uint32_t intraWarpIdx, uint32_t* res_A, uint32_t* res_B, uint32_t* res_C, uint32_t* res_D, uint32_t* res_E, uint32_t* res_F, uint32_t* res_G, uint32_t* res_H) { uint32_t carry; carry = ((__ballot(value_H >> 31) << 1) & (1 << intraWarpIdx)) != 0; carry = (localThreadIdx) ? carry : 0; value_H = (value_G >> 31) | (value_H << 1); value_G = (value_F >> 31) | (value_G << 1); value_F = (value_E >> 31) | (value_F << 1); value_E = (value_D >> 31) | (value_E << 1); value_D = (value_C >> 31) | (value_D << 1); value_C = (value_B >> 31) | (value_C << 1); value_B = (value_A >> 31) | (value_B << 1); value_A = carry | (value_A << 1); (* res_A) = value_A; (* res_B) = value_B; (* res_C) = value_C; (* res_D) = value_D; (* res_E) = value_E; (* res_F) = value_F; (* res_G) = value_G; (* res_H) = value_H; } inline __device__ void ballot_collaborative_sum(const uint32_t a_A, const uint32_t a_B, const uint32_t a_C, const uint32_t a_D, const uint32_t a_E, const uint32_t a_F, const uint32_t a_G, const uint32_t a_H, const uint32_t b_A, const uint32_t b_B, const uint32_t b_C, const uint32_t b_D, const uint32_t b_E, const uint32_t b_F, const uint32_t b_G, const uint32_t b_H, const uint32_t localThreadIdx, const uint32_t intraWarpIdx, uint32_t* sum_A, uint32_t* sum_B, uint32_t* sum_C, uint32_t* sum_D, uint32_t* sum_E, uint32_t* sum_F, uint32_t* sum_G, uint32_t* sum_H) { uint32_t carry, c_A, c_B, c_C, c_D, c_E, c_F, c_G, c_H; UADD__CARRY_OUT (c_A, a_A, b_A) UADD__IN_CARRY_OUT(c_B, a_B, b_B) UADD__IN_CARRY_OUT(c_C, a_C, b_C) UADD__IN_CARRY_OUT(c_D, a_D, b_D) UADD__IN_CARRY_OUT(c_E, a_E, b_E) UADD__IN_CARRY_OUT(c_F, a_F, b_F) UADD__IN_CARRY_OUT(c_G, a_G, b_G) UADD__IN_CARRY_OUT(c_H, a_H, b_H) UADD__IN_CARRY (carry, 0, 0) /* carry = ((__ballot(carry) << 1) & (1 << intraWarpIdx)) != 0; carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) */ while(__any(carry)){ carry = ((__ballot(carry) << 1) & (1 << intraWarpIdx)) != 0; carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY_OUT(c_E, c_E, 0) UADD__IN_CARRY_OUT(c_F, c_F, 0) UADD__IN_CARRY_OUT(c_G, c_G, 0) UADD__IN_CARRY_OUT(c_H, c_H, 0) UADD__IN_CARRY (carry, 0, 0) } (* sum_A) = c_A; (* sum_B) = c_B; (* sum_C) = c_C; (* sum_D) = c_D; (* sum_E) = c_E; (* sum_F) = c_F; (* sum_G) = c_G; (* sum_H) = c_H; } #endif inline __device__ uint32_t selectEq(const uint32_t indexBase, const uint32_t Eq0, const uint32_t Eq1, const uint32_t Eq2, const uint32_t Eq3, const uint32_t Eq4) { uint32_t Eq = Eq0; Eq = (indexBase == 1) ? Eq1 : Eq; Eq = (indexBase == 2) ? Eq2 : Eq; Eq = (indexBase == 3) ? Eq3 : Eq; Eq = (indexBase == 4) ? Eq4 : Eq; return Eq; } __global__ void myersKernel(const qryEntry_t *d_queries, const uint32_t * __restrict d_reference, const candInfo_t *d_candidates, resEntry_t *d_results, const uint32_t sizeCandidate, const uint32_t sizeQueries, const uint32_t sizeRef, const uint32_t numEntriesPerQuery, const uint32_t numEntriesPerCandidate, const uint32_t numCandidates, const uint32_t numThreads) { const uint32_t * __restrict localCandidate; uint32_t Ph_A, Mh_A, Pv_A, Mv_A, Xv_A, Xh_A, Eq_A, tEq_A; uint32_t Ph_B, Mh_B, Pv_B, Mv_B, Xv_B, Xh_B, Eq_B, tEq_B; uint32_t Ph_C, Mh_C, Pv_C, Mv_C, Xv_C, Xh_C, Eq_C, tEq_C; uint32_t Ph_D, Mh_D, Pv_D, Mv_D, Xv_D, Xh_D, Eq_D, tEq_D; uint32_t Ph_E, Mh_E, Pv_E, Mv_E, Xv_E, Xh_E, Eq_E, tEq_E; uint32_t Ph_F, Mh_F, Pv_F, Mv_F, Xv_F, Xh_F, Eq_F, tEq_F; uint32_t Ph_G, Mh_G, Pv_G, Mv_G, Xv_G, Xh_G, Eq_G, tEq_G; uint32_t Ph_H, Mh_H, Pv_H, Mv_H, Xv_H, Xh_H, Eq_H, tEq_H; uint32_t Eq0_A, Eq1_A, Eq2_A, Eq3_A, Eq4_A; uint32_t Eq0_B, Eq1_B, Eq2_B, Eq3_B, Eq4_B; uint32_t Eq0_C, Eq1_C, Eq2_C, Eq3_C, Eq4_C; uint32_t Eq0_D, Eq1_D, Eq2_D, Eq3_D, Eq4_D; uint32_t Eq0_E, Eq1_E, Eq2_E, Eq3_E, Eq4_E; uint32_t Eq0_F, Eq1_F, Eq2_F, Eq3_F, Eq4_F; uint32_t Eq0_G, Eq1_G, Eq2_G, Eq3_G, Eq4_G; uint32_t Eq0_H, Eq1_H, Eq2_H, Eq3_H, Eq4_H; uint32_t sum_A, sum_B, sum_C, sum_D, sum_E, sum_F, sum_G, sum_H; uint32_t candidate; uint32_t entry, idColumn = 0, indexBase; uint32_t globalThreadIdx = blockIdx.x * MAX_THREADS_PER_SM + threadIdx.x; uint32_t intraQueryThreadIdx = (threadIdx.x % SIZE_WARP) % THREADS_PER_QUERY; uint32_t idCandidate = ((globalThreadIdx / SIZE_WARP) * QUERIES_PER_WARP) + ((threadIdx.x % SIZE_WARP) / THREADS_PER_QUERY); #if defined(SHUFFLE) || defined(BALLOT) #else __shared__ uint32_t globalInterBuff[(SIZE_WARP + 1) * (CUDA_NUM_THREADS/SIZE_WARP)]; uint32_t *localInterBuff = globalInterBuff + ((threadIdx.x/SIZE_WARP) * (SIZE_WARP + 1)); #endif #ifndef SHUFFLE uint32_t intraWarpIdx = threadIdx.x % SIZE_WARP; #endif if ((threadIdx.x < MAX_THREADS_PER_SM) && (idCandidate < numCandidates)){ uint32_t positionRef = d_candidates[idCandidate].position; uint32_t entryRef = positionRef / BASES_PER_ENTRY; int32_t score = sizeQueries, minScore = sizeQueries; uint32_t minColumn = 0; uint32_t mask = ((sizeQueries % SIZE_HW_WORD) == 0) ? HIGH_MASK_32 : 1 << ((sizeQueries % SIZE_HW_WORD) - 1); uint32_t intraBase, idEntry; if((positionRef < sizeRef) && ((sizeRef - positionRef) > sizeCandidate)){ localCandidate = d_reference + entryRef; Pv_A = MAX_VALUE; Mv_A = 0; Pv_B = MAX_VALUE; Mv_B = 0; Pv_C = MAX_VALUE; Mv_C = 0; Pv_D = MAX_VALUE; Mv_D = 0; Pv_E = MAX_VALUE; Mv_E = 0; Pv_F = MAX_VALUE; Mv_F = 0; Pv_G = MAX_VALUE; Mv_G = 0; Pv_H = MAX_VALUE; Mv_H = 0; entry = (d_candidates[idCandidate].query * numEntriesPerQuery) + (ENTRIES_PER_THREAD * intraQueryThreadIdx); Eq0_A = d_queries[entry].bitmap[0]; Eq1_A = d_queries[entry].bitmap[1]; Eq2_A = d_queries[entry].bitmap[2]; Eq3_A = d_queries[entry].bitmap[3]; Eq4_A = d_queries[entry].bitmap[4]; Eq0_B = d_queries[entry + 1].bitmap[0]; Eq1_B = d_queries[entry + 1].bitmap[1]; Eq2_B = d_queries[entry + 1].bitmap[2]; Eq3_B = d_queries[entry + 1].bitmap[3]; Eq4_B = d_queries[entry + 1].bitmap[4]; Eq0_C = d_queries[entry + 2].bitmap[0]; Eq1_C = d_queries[entry + 2].bitmap[1]; Eq2_C = d_queries[entry + 2].bitmap[2]; Eq3_C = d_queries[entry + 2].bitmap[3]; Eq4_C = d_queries[entry + 2].bitmap[4]; Eq0_D = d_queries[entry + 3].bitmap[0]; Eq1_D = d_queries[entry + 3].bitmap[1]; Eq2_D = d_queries[entry + 3].bitmap[2]; Eq3_D = d_queries[entry + 3].bitmap[3]; Eq4_D = d_queries[entry + 3].bitmap[4]; Eq0_E = d_queries[entry + 4].bitmap[0]; Eq1_E = d_queries[entry + 4].bitmap[1]; Eq2_E = d_queries[entry + 4].bitmap[2]; Eq3_E = d_queries[entry + 4].bitmap[3]; Eq4_E = d_queries[entry + 4].bitmap[4]; Eq0_F = d_queries[entry + 5].bitmap[0]; Eq1_F = d_queries[entry + 5].bitmap[1]; Eq2_F = d_queries[entry + 5].bitmap[2]; Eq3_F = d_queries[entry + 5].bitmap[3]; Eq4_F = d_queries[entry + 5].bitmap[4]; Eq0_G = d_queries[entry + 6].bitmap[0]; Eq1_G = d_queries[entry + 6].bitmap[1]; Eq2_G = d_queries[entry + 6].bitmap[2]; Eq3_G = d_queries[entry + 6].bitmap[3]; Eq4_G = d_queries[entry + 6].bitmap[4]; Eq0_H = d_queries[entry + 7].bitmap[0]; Eq1_H = d_queries[entry + 7].bitmap[1]; Eq2_H = d_queries[entry + 7].bitmap[2]; Eq3_H = d_queries[entry + 7].bitmap[3]; Eq4_H = d_queries[entry + 7].bitmap[4]; for(idEntry = 0; idEntry < numEntriesPerCandidate; idEntry++){ candidate = localCandidate[idEntry]; for(intraBase = 0; intraBase < BASES_PER_ENTRY; intraBase++){ indexBase = candidate & 0x07; Eq_A = selectEq(indexBase, Eq0_A, Eq1_A, Eq2_A, Eq3_A, Eq4_A); Eq_B = selectEq(indexBase, Eq0_B, Eq1_B, Eq2_B, Eq3_B, Eq4_B); Eq_C = selectEq(indexBase, Eq0_C, Eq1_C, Eq2_C, Eq3_C, Eq4_C); Eq_D = selectEq(indexBase, Eq0_D, Eq1_D, Eq2_D, Eq3_D, Eq4_D); Eq_E = selectEq(indexBase, Eq0_E, Eq1_E, Eq2_E, Eq3_E, Eq4_E); Eq_F = selectEq(indexBase, Eq0_F, Eq1_F, Eq2_F, Eq3_F, Eq4_F); Eq_G = selectEq(indexBase, Eq0_G, Eq1_G, Eq2_G, Eq3_G, Eq4_G); Eq_H = selectEq(indexBase, Eq0_H, Eq1_H, Eq2_H, Eq3_H, Eq4_H); Xv_A = Eq_A | Mv_A; Xv_B = Eq_B | Mv_B; Xv_C = Eq_C | Mv_C; Xv_D = Eq_D | Mv_D; Xv_E = Eq_E | Mv_E; Xv_F = Eq_F | Mv_F; Xv_G = Eq_G | Mv_G; Xv_H = Eq_H | Mv_H; tEq_A = Eq_A & Pv_A; tEq_B = Eq_B & Pv_B; tEq_C = Eq_C & Pv_C; tEq_D = Eq_D & Pv_D; tEq_E = Eq_E & Pv_E; tEq_F = Eq_F & Pv_F; tEq_G = Eq_G & Pv_G; tEq_H = Eq_H & Pv_H; #ifdef SHUFFLE shuffle_collaborative_sum(tEq_A, tEq_B, tEq_C, tEq_D, tEq_E, tEq_F, tEq_G, tEq_H, Pv_A, Pv_B, Pv_C, Pv_D, Pv_E, Pv_F, Pv_G, Pv_H, intraQueryThreadIdx, &sum_A, &sum_B, &sum_C, &sum_D, &sum_E, &sum_F, &sum_G, &sum_H); #else #ifdef BALLOT ballot_collaborative_sum(tEq_A, tEq_B, tEq_C, tEq_D, tEq_E, tEq_F, tEq_G, tEq_H, Pv_A, Pv_B, Pv_C, Pv_D, Pv_E, Pv_F, Pv_G, Pv_H, intraQueryThreadIdx, intraWarpIdx, &sum_A, &sum_B, &sum_C, &sum_D, &sum_E, &sum_F, &sum_G, &sum_H); #else shared_collaborative_sum(tEq_A, tEq_B, tEq_C, tEq_D, tEq_E, tEq_F, tEq_G, tEq_H, Pv_A, Pv_B, Pv_C, Pv_D, Pv_E, Pv_F, Pv_G, Pv_H, intraQueryThreadIdx, intraWarpIdx, localInterBuff, &sum_A, &sum_B, &sum_C, &sum_D, &sum_E, &sum_F, &sum_G, &sum_H); #endif #endif Xh_A = (sum_A ^ Pv_A) | Eq_A; Xh_B = (sum_B ^ Pv_B) | Eq_B; Xh_C = (sum_C ^ Pv_C) | Eq_C; Xh_D = (sum_D ^ Pv_D) | Eq_D; Xh_E = (sum_E ^ Pv_E) | Eq_E; Xh_F = (sum_F ^ Pv_F) | Eq_F; Xh_G = (sum_G ^ Pv_G) | Eq_G; Xh_H = (sum_H ^ Pv_H) | Eq_H; Ph_A = Mv_A | ~(Xh_A | Pv_A); Ph_B = Mv_B | ~(Xh_B | Pv_B); Ph_C = Mv_C | ~(Xh_C | Pv_C); Ph_D = Mv_D | ~(Xh_D | Pv_D); Ph_E = Mv_E | ~(Xh_E | Pv_E); Ph_F = Mv_F | ~(Xh_F | Pv_F); Ph_G = Mv_G | ~(Xh_G | Pv_G); Ph_H = Mv_H | ~(Xh_H | Pv_H); Mh_A = Pv_A & Xh_A; Mh_B = Pv_B & Xh_B; Mh_C = Pv_C & Xh_C; Mh_D = Pv_D & Xh_D; Mh_E = Pv_E & Xh_E; Mh_F = Pv_F & Xh_F; Mh_G = Pv_G & Xh_G; Mh_H = Pv_H & Xh_H; score += ((REG_PH & mask) != 0) - ((REG_MH & mask) != 0); #ifdef SHUFFLE shuffle_collaborative_shift(Ph_A, Ph_B, Ph_C, Ph_D, Ph_E, Ph_F, Ph_G, Ph_H, intraQueryThreadIdx, &Ph_A, &Ph_B, &Ph_C, &Ph_D, &Ph_E, &Ph_F, &Ph_G, &Ph_H); shuffle_collaborative_shift(Mh_A, Mh_B, Mh_C, Mh_D, Mh_E, Mh_F, Mh_G, Mh_H, intraQueryThreadIdx, &Mh_A, &Mh_B, &Mh_C, &Mh_D, &Mh_E, &Mh_F, &Mh_G, &Mh_H); #else #ifdef BALLOT ballot_collaborative_shift(Ph_A, Ph_B, Ph_C, Ph_D, Ph_E, Ph_F, Ph_G, Ph_H, intraQueryThreadIdx, intraWarpIdx, &Ph_A, &Ph_B, &Ph_C, &Ph_D, &Ph_E, &Ph_F, &Ph_G, &Ph_H); ballot_collaborative_shift(Mh_A, Mh_B, Mh_C, Mh_D, Mh_E, Mh_F, Mh_G, Mh_H, intraQueryThreadIdx, intraWarpIdx, &Mh_A, &Mh_B, &Mh_C, &Mh_D, &Mh_E, &Mh_F, &Mh_G, &Mh_H); #else shared_collaborative_shift(Ph_A, Ph_B, Ph_C, Ph_D, Ph_E, Ph_F, Ph_G, Ph_H, intraQueryThreadIdx, intraWarpIdx, localInterBuff, &Ph_A, &Ph_B, &Ph_C, &Ph_D, &Ph_E, &Ph_F, &Ph_G, &Ph_H); shared_collaborative_shift(Mh_A, Mh_B, Mh_C, Mh_D, Mh_E, Mh_F, Mh_G, Mh_H, intraQueryThreadIdx, intraWarpIdx, localInterBuff, &Mh_A, &Mh_B, &Mh_C, &Mh_D, &Mh_E, &Mh_F, &Mh_G, &Mh_H); #endif #endif Pv_A = Mh_A | ~(Xv_A | Ph_A); Pv_B = Mh_B | ~(Xv_B | Ph_B); Pv_C = Mh_C | ~(Xv_C | Ph_C); Pv_D = Mh_D | ~(Xv_D | Ph_D); Pv_E = Mh_E | ~(Xv_E | Ph_E); Pv_F = Mh_F | ~(Xv_F | Ph_F); Pv_G = Mh_G | ~(Xv_G | Ph_G); Pv_H = Mh_H | ~(Xv_H | Ph_H); Mv_A = Ph_A & Xv_A; Mv_B = Ph_B & Xv_B; Mv_C = Ph_C & Xv_C; Mv_D = Ph_D & Xv_D; Mv_E = Ph_E & Xv_E; Mv_F = Ph_F & Xv_F; Mv_G = Ph_G & Xv_G; Mv_H = Ph_H & Xv_H; candidate >>= 4; minColumn = (score < minScore) ? idColumn : minColumn; minScore = (score < minScore) ? score : minScore; idColumn++; } } if(intraQueryThreadIdx == (THREADS_PER_QUERY - 1)){ d_results[idCandidate].column = minColumn; d_results[idCandidate].score = minScore; } } } } extern "C" void computeAllQueriesGPU(void *reference, void *queries, void *results) { ref_t *ref = (ref_t *) reference; qry_t *qry = (qry_t *) queries; res_t *res = (res_t *) results; uint32_t blocksPerGrid, threadsPerBlock = MAX_THREADS_PER_SM; uint32_t sizeCandidate = qry->sizeQueries * (1 + 2 * qry->distance); uint32_t numEntriesPerQuery = (qry->sizeQueries / SIZE_HW_WORD) + ((qry->sizeQueries % SIZE_HW_WORD) ? 1 : 0); uint32_t numEntriesPerCandidate = (sizeCandidate / BASES_PER_ENTRY) + ((sizeCandidate % BASES_PER_ENTRY) ? 2 : 1); uint32_t maxCandidates, numCandidates, lastCandidates, processedCandidates; uint32_t numLaunches, kernelIdx, maxThreads; uint32_t activeThreads, idleThreads, numThreads; printf("-- Word size: %d - Query Size: %d - Query Space: %d - Last Register: %d\n-- Threads per Query: %d - Queries per Warp: %d - Threads Idle: %d\n", BASES_PER_THREAD, SIZE_QUERY, SPACE_PER_QUERY, REG, THREADS_PER_QUERY, QUERIES_PER_WARP, WARP_THREADS_IDLE); #ifdef FUNNEL printf("-- OPT: funnelShift [ON] -- "); #else printf("-- OPT: funnelShift [OFF] -- "); #endif #ifdef SHUFFLE printf("shuffle [ON] -- "); #else printf("shuffle [OFF] -- "); #endif #ifdef BALLOT printf("ballot [ON]\n"); #else printf("ballot [OFF]\n"); #endif printf("\n"); /////////LAUNCH GPU KERNELS: //LAUNCH KERNELS FOR KEPLERs GPUs if(DEVICE == 0){ activeThreads = (qry->numCandidates * THREADS_PER_QUERY); idleThreads = ((activeThreads / WARP_THREADS_ACTIVE) * WARP_THREADS_IDLE); numThreads = activeThreads + idleThreads; blocksPerGrid = (numThreads / MAX_THREADS_PER_SM) + ((numThreads % MAX_THREADS_PER_SM) ? 1 : 0); printf("KEPLER: LAUNCH KERNEL 0 -- Bloques: %d - Th_block %d - Th_sm %d\n", blocksPerGrid, threadsPerBlock, MAX_THREADS_PER_SM); myersKernel<<<blocksPerGrid, threadsPerBlock>>>(qry->d_queries, ref->d_reference, qry->d_candidates, res->d_results, sizeCandidate, qry->sizeQueries, ref->size, numEntriesPerQuery, numEntriesPerCandidate, qry->numCandidates, numThreads); cudaThreadSynchronize(); } //LAUNCH KERNELS FOR FERMIs GPUs if(DEVICE == 1){ maxThreads = threadsPerBlock * 65535; maxCandidates = (maxThreads / SIZE_WARP) * QUERIES_PER_WARP; numLaunches = (qry->numCandidates / maxCandidates) + ((qry->numCandidates / maxCandidates) ? 1 : 0); lastCandidates = qry->numCandidates; processedCandidates = 0; for(kernelIdx=0; kernelIdx<numLaunches; kernelIdx++){ numCandidates = MIN(lastCandidates, maxCandidates); activeThreads = (numCandidates * THREADS_PER_QUERY); idleThreads = ((activeThreads / WARP_THREADS_ACTIVE) * WARP_THREADS_IDLE); numThreads = activeThreads + idleThreads; blocksPerGrid = (numThreads / MAX_THREADS_PER_SM) + ((numThreads % MAX_THREADS_PER_SM) ? 1 : 0); printf("FERMI: LAUNCH KERNEL %d -- Bloques: %d - Th_block %d - Th_sm %d\n", kernelIdx, blocksPerGrid, threadsPerBlock, MAX_THREADS_PER_SM); myersKernel<<<blocksPerGrid, threadsPerBlock>>>(qry->d_queries, ref->d_reference, qry->d_candidates + processedCandidates, res->d_results + processedCandidates, sizeCandidate, qry->sizeQueries, ref->size, numEntriesPerQuery, numEntriesPerCandidate, numCandidates, numThreads); lastCandidates -= numCandidates; processedCandidates += numCandidates; } cudaThreadSynchronize(); } } extern "C" int transferCPUtoGPU(void *reference, void *queries, void *results) { ref_t *ref = (ref_t *) reference; qry_t *qry = (qry_t *) queries; res_t *res = (res_t *) results; HANDLE_ERROR(cudaSetDevice(DEVICE)); //allocate & transfer Binary Reference to GPU HANDLE_ERROR(cudaMalloc((void**) &ref->d_reference, ((uint64_t) ref->numEntries) * sizeof(uint32_t))); HANDLE_ERROR(cudaMemcpy(ref->d_reference, ref->h_reference, ((uint64_t) ref->numEntries) * sizeof(uint32_t), cudaMemcpyHostToDevice)); //allocate & transfer Binary Queries to GPU HANDLE_ERROR(cudaMalloc((void**) &qry->d_queries, qry->totalQueriesEntries * sizeof(qryEntry_t))); HANDLE_ERROR(cudaMemcpy(qry->d_queries, qry->h_queries, qry->totalQueriesEntries * sizeof(qryEntry_t), cudaMemcpyHostToDevice)); //allocate & transfer Candidates to GPU HANDLE_ERROR(cudaMalloc((void**) &qry->d_candidates, qry->numCandidates * sizeof(candInfo_t))); HANDLE_ERROR(cudaMemcpy(qry->d_candidates, qry->h_candidates, qry->numCandidates * sizeof(candInfo_t), cudaMemcpyHostToDevice)); //allocate Results HANDLE_ERROR(cudaMalloc((void**) &res->d_results, res->numResults * sizeof(resEntry_t))); HANDLE_ERROR(cudaMemcpy(res->d_results, res->h_results, res->numResults * sizeof(resEntry_t), cudaMemcpyHostToDevice)); return (0); } extern "C" int transferGPUtoCPU(void *results) { res_t *res = (res_t *) results; HANDLE_ERROR(cudaMemcpy(res->h_results, res->d_results, res->numResults * sizeof(resEntry_t), cudaMemcpyDeviceToHost)); return (0); } extern "C" int freeReferenceGPU(void *reference) { ref_t *ref = (ref_t *) reference; if(ref->d_reference != NULL){ cudaFree(ref->d_reference); ref->d_reference=NULL; } return(0); } extern "C" int freeQueriesGPU(void *queries) { qry_t *qry = (qry_t *) queries; if(qry->d_queries != NULL){ cudaFree(qry->d_queries); qry->d_queries=NULL; } if(qry->d_candidates != NULL){ cudaFree(qry->d_candidates); qry->d_candidates = NULL; } return(0); } extern "C" int freeResultsGPU(void *results) { res_t *res = (res_t *) results; if(res->d_results != NULL){ cudaFree(res->d_results); res->d_results=NULL; } return(0); } carry-ou
fb59628452160ca3d37cf1d0a9511b26d08a0802.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ inline int getTransArrayIndex(unsigned int width, unsigned int height, unsigned int i) { return height * (i % width) + i / width; } __global__ void kCopyToTransDestSlow(float* srcStart, float* destStart, unsigned int srcCopyWidth, unsigned int srcJumpWidth, unsigned int destJumpHeight, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < numElements) destStart[getTransArrayIndex(srcCopyWidth, destJumpHeight, idx)] = srcStart[(idx / srcCopyWidth) * srcJumpWidth + idx % srcCopyWidth]; }
fb59628452160ca3d37cf1d0a9511b26d08a0802.cu
#include "includes.h" __device__ inline int getTransArrayIndex(unsigned int width, unsigned int height, unsigned int i) { return height * (i % width) + i / width; } __global__ void kCopyToTransDestSlow(float* srcStart, float* destStart, unsigned int srcCopyWidth, unsigned int srcJumpWidth, unsigned int destJumpHeight, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < numElements) destStart[getTransArrayIndex(srcCopyWidth, destJumpHeight, idx)] = srcStart[(idx / srcCopyWidth) * srcJumpWidth + idx % srcCopyWidth]; }
7a13cef8d27adc2d72094d96cc1ee53ec1b624b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <vector> #include <memory> #include <iostream> #include "virtual_interplay.h" #include "cuda_memory" #include "unified_array.h" #ifndef CUDA_CALL #define CUDA_CALL(x) do { if((x) != hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ exit(EXIT_FAILURE);}} while(0) #endif using namespace std; class Base { public: int b_id; public: Base() = default; __host__ __device__ Base(int _b_id):b_id(_b_id){} virtual ~Base() = default; __device__ virtual void f() = 0; __host__ __device__ virtual void g() = 0; }; // Classes with virtual bases are supported class Sub1 : virtual public Base { public: int sub_id; public: Sub1() = default; __host__ __device__ Sub1(int _b_id, int _sub_id):Base(_b_id), sub_id(_sub_id){} __device__ void f() override { printf("Sub1::f %d %d!\n", b_id, sub_id); b_id++; sub_id++; } __host__ __device__ void g() override { printf("Sub1::g %d %d!\n", b_id, sub_id); } }; class Sub2 : public Base { public: char sub_ch; public: Sub2() = delete; // non-default constructible classes are also supported __host__ __device__ Sub2(int _b_id, char _sub_ch):Base(_b_id), sub_ch(_sub_ch){} Sub2(const Sub2&) = delete; // non-copy constructible classes are also supported Sub2(Sub2&& s) = default; __device__ void f() override { printf("Sub2::f %d %c!\n", b_id, sub_ch); b_id++; sub_ch++; } __host__ __device__ void g() override { printf("Sub2::g %d %c!\n", b_id, sub_ch); } }; __global__ void runner(Base* objects[], int len) { for (int i=0; i<len; i++) { objects[i]->f(); } } int main() { auto s1 = make_unified_unique<Sub1>(1, 2); auto s2 = make_unified_unique<Sub2>(3, 'd'); UnifiedArray<Base*> objs(2); objs[0] = s1.get(); objs[1] = s2.get(); // Migrate the objects to the device ClassMigrator<Sub1> sub1_migrator; sub1_migrator.toDeviceWithVirtualBases(s1.get(), 1); ClassMigrator<Sub2>::toDevice(s2.get(), 1); // Run the demo to make sure everything works. hipLaunchKernelGGL(( runner), dim3(1),dim3(1), 0, 0, objs.data(), 2); // Migrate the objects back to the host sub1_migrator.toHostWithVirtualBases(s1.get(), 1); ClassMigrator<Sub2>::toHost(s2.get(), 1); // Another demo to make sure that the changes come back to the host objs[0]->g(); objs[1]->g(); hipDeviceSynchronize(); return 0; }
7a13cef8d27adc2d72094d96cc1ee53ec1b624b1.cu
#include <stdio.h> #include <vector> #include <memory> #include <iostream> #include "virtual_interplay.h" #include "cuda_memory" #include "unified_array.h" #ifndef CUDA_CALL #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ exit(EXIT_FAILURE);}} while(0) #endif using namespace std; class Base { public: int b_id; public: Base() = default; __host__ __device__ Base(int _b_id):b_id(_b_id){} virtual ~Base() = default; __device__ virtual void f() = 0; __host__ __device__ virtual void g() = 0; }; // Classes with virtual bases are supported class Sub1 : virtual public Base { public: int sub_id; public: Sub1() = default; __host__ __device__ Sub1(int _b_id, int _sub_id):Base(_b_id), sub_id(_sub_id){} __device__ void f() override { printf("Sub1::f %d %d!\n", b_id, sub_id); b_id++; sub_id++; } __host__ __device__ void g() override { printf("Sub1::g %d %d!\n", b_id, sub_id); } }; class Sub2 : public Base { public: char sub_ch; public: Sub2() = delete; // non-default constructible classes are also supported __host__ __device__ Sub2(int _b_id, char _sub_ch):Base(_b_id), sub_ch(_sub_ch){} Sub2(const Sub2&) = delete; // non-copy constructible classes are also supported Sub2(Sub2&& s) = default; __device__ void f() override { printf("Sub2::f %d %c!\n", b_id, sub_ch); b_id++; sub_ch++; } __host__ __device__ void g() override { printf("Sub2::g %d %c!\n", b_id, sub_ch); } }; __global__ void runner(Base* objects[], int len) { for (int i=0; i<len; i++) { objects[i]->f(); } } int main() { auto s1 = make_unified_unique<Sub1>(1, 2); auto s2 = make_unified_unique<Sub2>(3, 'd'); UnifiedArray<Base*> objs(2); objs[0] = s1.get(); objs[1] = s2.get(); // Migrate the objects to the device ClassMigrator<Sub1> sub1_migrator; sub1_migrator.toDeviceWithVirtualBases(s1.get(), 1); ClassMigrator<Sub2>::toDevice(s2.get(), 1); // Run the demo to make sure everything works. runner<<<1,1>>>(objs.data(), 2); // Migrate the objects back to the host sub1_migrator.toHostWithVirtualBases(s1.get(), 1); ClassMigrator<Sub2>::toHost(s2.get(), 1); // Another demo to make sure that the changes come back to the host objs[0]->g(); objs[1]->g(); cudaDeviceSynchronize(); return 0; }
7816cea36615b88bfb4f11a384d35ac601057c1b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_view.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/device_operators.cuh> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/reduction.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/iterator/counting_iterator.h> #include <thrust/transform_reduce.h> #include <type_traits> namespace cudf { namespace detail { namespace { /** * @brief Basic element for the minmax reduce operation. * * Stores the minimum and maximum values that have been encountered so far */ template <typename T> struct minmax_pair { T min_val; T max_val; __host__ __device__ minmax_pair() : min_val(cudf::DeviceMin::identity<T>()), max_val(cudf::DeviceMax::identity<T>()){}; __host__ __device__ minmax_pair(T val) : min_val(val), max_val(val){}; __host__ __device__ minmax_pair(T min_val_, T max_val_) : min_val(min_val_), max_val(max_val_){}; }; /** * @brief Reduce for the minmax operation and return a device scalar. * * @tparam Op Binary operator functor * @tparam InputIterator Input iterator Type * @param d_in input iterator * @param num_items number of items to reduce * @param binary_op binary operator used to reduce * @param mr Device resource used for result allocation * @param stream CUDA stream to run kernels on. * @return rmm::device_scalar<OutputType> */ template <typename T, typename Op, typename InputIterator, typename OutputType = typename thrust::iterator_value<InputIterator>::type> rmm::device_scalar<OutputType> reduce_device(InputIterator d_in, cudf::size_type num_items, Op binary_op, rmm::cuda_stream_view stream) { OutputType identity{}; rmm::device_scalar<OutputType> result{identity, stream}; // Allocate temporary storage size_t storage_bytes = 0; hipcub::DeviceReduce::Reduce( nullptr, storage_bytes, d_in, result.data(), num_items, binary_op, identity, stream.value()); auto temp_storage = rmm::device_buffer{storage_bytes, stream}; // Run reduction hipcub::DeviceReduce::Reduce(temp_storage.data(), storage_bytes, d_in, result.data(), num_items, binary_op, identity, stream.value()); return result; } /** * @brief Functor that accepts two minmax_pairs and returns a * minmax_pair whose minimum and maximum values are the min() and max() * respectively of the minimums and maximums of the input pairs. */ template <typename T> struct minmax_binary_op : public thrust::binary_function<minmax_pair<T>, minmax_pair<T>, minmax_pair<T>> { __device__ minmax_pair<T> operator()(minmax_pair<T> const &lhs, minmax_pair<T> const &rhs) const { return minmax_pair<T>{thrust::min(lhs.min_val, rhs.min_val), thrust::max(lhs.max_val, rhs.max_val)}; } }; /** * @brief Creates a minmax_pair<T> from a T */ template <typename T> struct create_minmax { __device__ minmax_pair<T> operator()(T e) { return minmax_pair<T>{e}; } }; /** * @brief Functor that takes a thrust::pair<T, bool> and produces a minmax_pair * that is <T, T> for minimum and maximum or <cudf::DeviceMin::identity<T>(), * cudf::DeviceMax::identity<T>()> */ template <typename T> struct create_minmax_with_nulls { __device__ minmax_pair<T> operator()(thrust::pair<T, bool> i) { return i.second ? minmax_pair<T>{i.first} : minmax_pair<T>{}; } }; /** * @brief Dispatch functor for minmax operation. * * This uses the reduce function to compute the min and max values * simultaneously for a column of data. * * @tparam T The input column's type */ struct minmax_functor { template <typename T> static constexpr bool is_supported() { return !(cudf::is_fixed_point<T>() || std::is_same<T, cudf::list_view>::value || std::is_same<T, cudf::struct_view>::value); } template <typename T> auto reduce(column_view const &col, rmm::cuda_stream_view stream) { auto device_col = column_device_view::create(col, stream); // compute minimum and maximum values if (col.has_nulls()) { auto pair_to_minmax = thrust::make_transform_iterator( make_pair_iterator<T, true>(*device_col), create_minmax_with_nulls<T>{}); return reduce_device<T>(pair_to_minmax, col.size(), minmax_binary_op<T>{}, stream); } else { auto col_to_minmax = thrust::make_transform_iterator(device_col->begin<T>(), create_minmax<T>{}); return reduce_device<T>(col_to_minmax, col.size(), minmax_binary_op<T>{}, stream); } } /** * @brief Functor to copy a minmax_pair result to individual scalar instances. */ template <typename T, typename ResultType = minmax_pair<T>> struct assign_min_max { __device__ void operator()() { *min_data = result->min_val; *max_data = result->max_val; } ResultType *result; T *min_data; T *max_data; }; template <typename T, std::enable_if_t<is_supported<T>() and !std::is_same<T, cudf::string_view>::value and !cudf::is_dictionary<T>()> * = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const &col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // compute minimum and maximum values auto dev_result = reduce<T>(col, stream); // create output scalars using ScalarType = cudf::scalar_type_t<T>; auto minimum = new ScalarType(T{}, true, stream, mr); auto maximum = new ScalarType(T{}, true, stream, mr); // copy dev_result to the output scalars device_single_thread(assign_min_max<T>{dev_result.data(), minimum->data(), maximum->data()}, stream); return {std::unique_ptr<scalar>(minimum), std::unique_ptr<scalar>(maximum)}; } /** * @brief Specialization for strings column. */ template <typename T, std::enable_if_t<std::is_same<T, cudf::string_view>::value> * = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const &col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // compute minimum and maximum values auto dev_result = reduce<cudf::string_view>(col, stream); // copy the minmax_pair to the host; does not copy the strings using OutputType = minmax_pair<cudf::string_view>; OutputType host_result; CUDA_TRY(hipMemcpyAsync( &host_result, dev_result.data(), sizeof(OutputType), hipMemcpyDeviceToHost, stream.value())); // strings are copied to create the scalars here return {std::make_unique<string_scalar>(host_result.min_val, true, stream, mr), std::make_unique<string_scalar>(host_result.max_val, true, stream, mr)}; } /** * @brief Specialization for dictionary column. */ template <typename T, std::enable_if_t<cudf::is_dictionary<T>()> * = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const &col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // compute minimum and maximum values auto dev_result = reduce<T>(col, stream); // copy the minmax_pair to the host to call get_element using OutputType = minmax_pair<T>; OutputType host_result; CUDA_TRY(hipMemcpyAsync( &host_result, dev_result.data(), sizeof(OutputType), hipMemcpyDeviceToHost, stream.value())); // get the keys for those indexes auto const keys = dictionary_column_view(col).keys(); return {get_element(keys, static_cast<size_type>(host_result.min_val), stream, mr), get_element(keys, static_cast<size_type>(host_result.max_val), stream, mr)}; } template <typename T, std::enable_if_t<!is_supported<T>()> * = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const &, rmm::cuda_stream_view, rmm::mr::device_memory_resource *) { CUDF_FAIL("type not supported for minmax() operation"); } }; } // namespace std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax( cudf::column_view const &col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { if (col.null_count() == col.size()) { // this handles empty and all-null columns // return scalars with valid==false return {make_default_constructed_scalar(col.type(), stream, mr), make_default_constructed_scalar(col.type(), stream, mr)}; } return type_dispatcher(col.type(), minmax_functor{}, col, stream, mr); } } // namespace detail /** * @copydoc cudf::minmax */ std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax( const column_view &col, rmm::mr::device_memory_resource *mr) { return detail::minmax(col, rmm::cuda_stream_default, mr); } } // namespace cudf
7816cea36615b88bfb4f11a384d35ac601057c1b.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_view.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/device_operators.cuh> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/reduction.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/iterator/counting_iterator.h> #include <thrust/transform_reduce.h> #include <type_traits> namespace cudf { namespace detail { namespace { /** * @brief Basic element for the minmax reduce operation. * * Stores the minimum and maximum values that have been encountered so far */ template <typename T> struct minmax_pair { T min_val; T max_val; __host__ __device__ minmax_pair() : min_val(cudf::DeviceMin::identity<T>()), max_val(cudf::DeviceMax::identity<T>()){}; __host__ __device__ minmax_pair(T val) : min_val(val), max_val(val){}; __host__ __device__ minmax_pair(T min_val_, T max_val_) : min_val(min_val_), max_val(max_val_){}; }; /** * @brief Reduce for the minmax operation and return a device scalar. * * @tparam Op Binary operator functor * @tparam InputIterator Input iterator Type * @param d_in input iterator * @param num_items number of items to reduce * @param binary_op binary operator used to reduce * @param mr Device resource used for result allocation * @param stream CUDA stream to run kernels on. * @return rmm::device_scalar<OutputType> */ template <typename T, typename Op, typename InputIterator, typename OutputType = typename thrust::iterator_value<InputIterator>::type> rmm::device_scalar<OutputType> reduce_device(InputIterator d_in, cudf::size_type num_items, Op binary_op, rmm::cuda_stream_view stream) { OutputType identity{}; rmm::device_scalar<OutputType> result{identity, stream}; // Allocate temporary storage size_t storage_bytes = 0; cub::DeviceReduce::Reduce( nullptr, storage_bytes, d_in, result.data(), num_items, binary_op, identity, stream.value()); auto temp_storage = rmm::device_buffer{storage_bytes, stream}; // Run reduction cub::DeviceReduce::Reduce(temp_storage.data(), storage_bytes, d_in, result.data(), num_items, binary_op, identity, stream.value()); return result; } /** * @brief Functor that accepts two minmax_pairs and returns a * minmax_pair whose minimum and maximum values are the min() and max() * respectively of the minimums and maximums of the input pairs. */ template <typename T> struct minmax_binary_op : public thrust::binary_function<minmax_pair<T>, minmax_pair<T>, minmax_pair<T>> { __device__ minmax_pair<T> operator()(minmax_pair<T> const &lhs, minmax_pair<T> const &rhs) const { return minmax_pair<T>{thrust::min(lhs.min_val, rhs.min_val), thrust::max(lhs.max_val, rhs.max_val)}; } }; /** * @brief Creates a minmax_pair<T> from a T */ template <typename T> struct create_minmax { __device__ minmax_pair<T> operator()(T e) { return minmax_pair<T>{e}; } }; /** * @brief Functor that takes a thrust::pair<T, bool> and produces a minmax_pair * that is <T, T> for minimum and maximum or <cudf::DeviceMin::identity<T>(), * cudf::DeviceMax::identity<T>()> */ template <typename T> struct create_minmax_with_nulls { __device__ minmax_pair<T> operator()(thrust::pair<T, bool> i) { return i.second ? minmax_pair<T>{i.first} : minmax_pair<T>{}; } }; /** * @brief Dispatch functor for minmax operation. * * This uses the reduce function to compute the min and max values * simultaneously for a column of data. * * @tparam T The input column's type */ struct minmax_functor { template <typename T> static constexpr bool is_supported() { return !(cudf::is_fixed_point<T>() || std::is_same<T, cudf::list_view>::value || std::is_same<T, cudf::struct_view>::value); } template <typename T> auto reduce(column_view const &col, rmm::cuda_stream_view stream) { auto device_col = column_device_view::create(col, stream); // compute minimum and maximum values if (col.has_nulls()) { auto pair_to_minmax = thrust::make_transform_iterator( make_pair_iterator<T, true>(*device_col), create_minmax_with_nulls<T>{}); return reduce_device<T>(pair_to_minmax, col.size(), minmax_binary_op<T>{}, stream); } else { auto col_to_minmax = thrust::make_transform_iterator(device_col->begin<T>(), create_minmax<T>{}); return reduce_device<T>(col_to_minmax, col.size(), minmax_binary_op<T>{}, stream); } } /** * @brief Functor to copy a minmax_pair result to individual scalar instances. */ template <typename T, typename ResultType = minmax_pair<T>> struct assign_min_max { __device__ void operator()() { *min_data = result->min_val; *max_data = result->max_val; } ResultType *result; T *min_data; T *max_data; }; template <typename T, std::enable_if_t<is_supported<T>() and !std::is_same<T, cudf::string_view>::value and !cudf::is_dictionary<T>()> * = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const &col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // compute minimum and maximum values auto dev_result = reduce<T>(col, stream); // create output scalars using ScalarType = cudf::scalar_type_t<T>; auto minimum = new ScalarType(T{}, true, stream, mr); auto maximum = new ScalarType(T{}, true, stream, mr); // copy dev_result to the output scalars device_single_thread(assign_min_max<T>{dev_result.data(), minimum->data(), maximum->data()}, stream); return {std::unique_ptr<scalar>(minimum), std::unique_ptr<scalar>(maximum)}; } /** * @brief Specialization for strings column. */ template <typename T, std::enable_if_t<std::is_same<T, cudf::string_view>::value> * = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const &col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // compute minimum and maximum values auto dev_result = reduce<cudf::string_view>(col, stream); // copy the minmax_pair to the host; does not copy the strings using OutputType = minmax_pair<cudf::string_view>; OutputType host_result; CUDA_TRY(cudaMemcpyAsync( &host_result, dev_result.data(), sizeof(OutputType), cudaMemcpyDeviceToHost, stream.value())); // strings are copied to create the scalars here return {std::make_unique<string_scalar>(host_result.min_val, true, stream, mr), std::make_unique<string_scalar>(host_result.max_val, true, stream, mr)}; } /** * @brief Specialization for dictionary column. */ template <typename T, std::enable_if_t<cudf::is_dictionary<T>()> * = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const &col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // compute minimum and maximum values auto dev_result = reduce<T>(col, stream); // copy the minmax_pair to the host to call get_element using OutputType = minmax_pair<T>; OutputType host_result; CUDA_TRY(cudaMemcpyAsync( &host_result, dev_result.data(), sizeof(OutputType), cudaMemcpyDeviceToHost, stream.value())); // get the keys for those indexes auto const keys = dictionary_column_view(col).keys(); return {get_element(keys, static_cast<size_type>(host_result.min_val), stream, mr), get_element(keys, static_cast<size_type>(host_result.max_val), stream, mr)}; } template <typename T, std::enable_if_t<!is_supported<T>()> * = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const &, rmm::cuda_stream_view, rmm::mr::device_memory_resource *) { CUDF_FAIL("type not supported for minmax() operation"); } }; } // namespace std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax( cudf::column_view const &col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { if (col.null_count() == col.size()) { // this handles empty and all-null columns // return scalars with valid==false return {make_default_constructed_scalar(col.type(), stream, mr), make_default_constructed_scalar(col.type(), stream, mr)}; } return type_dispatcher(col.type(), minmax_functor{}, col, stream, mr); } } // namespace detail /** * @copydoc cudf::minmax */ std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax( const column_view &col, rmm::mr::device_memory_resource *mr) { return detail::minmax(col, rmm::cuda_stream_default, mr); } } // namespace cudf
9f1ea74b1eb706ebc12e67d34adfd3672ed7f635.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> __global__ void print_details_of_warps() { int gid = blockIdx.x * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; int warp_id = threadIdx.x / 32; int grid_idx = blockIdx.y * gridDim.x + blockIdx.x; printf("tid : %d, bid.x : %d, bid.y : %d, gid : %d, warp_id : %d, grid_idx : %d\n", threadIdx.x, blockIdx.x, blockIdx.y, gid, warp_id, grid_idx); } int main(int argc, char** argv){ dim3 block_size(42); dim3 grid_size(2,2); hipLaunchKernelGGL(( print_details_of_warps) , dim3(grid_size),dim3(block_size), 0, 0, ); hipDeviceSynchronize(); hipDeviceReset(); return EXIT_SUCCESS; }
9f1ea74b1eb706ebc12e67d34adfd3672ed7f635.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> __global__ void print_details_of_warps() { int gid = blockIdx.x * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; int warp_id = threadIdx.x / 32; int grid_idx = blockIdx.y * gridDim.x + blockIdx.x; printf("tid : %d, bid.x : %d, bid.y : %d, gid : %d, warp_id : %d, grid_idx : %d\n", threadIdx.x, blockIdx.x, blockIdx.y, gid, warp_id, grid_idx); } int main(int argc, char** argv){ dim3 block_size(42); dim3 grid_size(2,2); print_details_of_warps <<< grid_size,block_size>>>(); cudaDeviceSynchronize(); cudaDeviceReset(); return EXIT_SUCCESS; }
18473bc50611e069944ad3b4b25f8d929adeab6d.hip
// !!! This is a file automatically generated by hipify!!! /* * University of Illinois Open Source License * Copyright 2010 Luthey-Schulten Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, nor the names of its contributors may be used to endorse or * promote products derived from this Software without specific prior written * permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Elijah Roberts */ #include <cstdio> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define PROF_ENABLE #define PROF_MAX_THREADS 1 #define PROF_MAX_EVENTS 1000 #define PROF_MAX_CUDA_EVENT_BUFFER 1000 #include "lptf/Profile.h" #undef PROF_ENABLE #include "lm/Cuda.h" #include "TimingConstants.h" #define LS_WORDS_PER_SITE 2 #define LS_APRON_SIZE 1 #define LS_X_BLOCK_MAX_X_SIZE 256 #define LS_Y_BLOCK_X_SIZE 32 #define LS_Y_BLOCK_Y_SIZE 4 #define LS_Z_BLOCK_X_SIZE 32 #define LS_Z_BLOCK_Z_SIZE 4 #include "lm/rdme/dev/xor_random_dev.cu" #include "lm/rdme/dev/lattice_sim_1d_dev.cu" #include <hiprand/hiprand_kernel.h> // Allocate the profile space. PROF_ALLOC; #define X_SIZE 128 #define Y_SIZE 128 #define Z_SIZE 64 #define NUM_LAUNCHES 100 __global__ void xorshift_int_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash); __global__ void xorshift_float_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash); __global__ void xorwow_init_kernel(unsigned int* outLattice, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize, hiprandState_t *rngState); __global__ void xorwow_int_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, hiprandState_t *state); __global__ void xorwow_float_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, hiprandState_t *state); int main(int argc, char **argv) { try { PROF_INIT; PROF_BEGIN(PROF_MAIN_RUN); // Allocate the cuda resources. hipStream_t stream; unsigned int * hostOutLattice = new unsigned int[X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE]; void* outLattice; void* rngState; CUDA_EXCEPTION_CHECK(hipStreamCreate(&stream)); CUDA_EXCEPTION_CHECK(hipMalloc(&outLattice, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(hipMalloc(&rngState, X_SIZE*Y_SIZE*Z_SIZE*sizeof(hiprandState_t))); // Start timings the kernels. PROF_BEGIN(PROF_SUBMIT_KERNELS); // Calculate some properties of the lattice. const unsigned int latticeXSize = X_SIZE; const unsigned int latticeYSize = Y_SIZE; const unsigned int latticeZSize = Z_SIZE; const unsigned int latticeXYSize = X_SIZE*Y_SIZE; const unsigned int latticeXYZSize = X_SIZE*Y_SIZE*Z_SIZE; unsigned int gridXSize; dim3 gridSize, threadBlockSize; if (!calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize)) throw lm::InvalidArgException("Unable to calculate correct launch parameters, the lattice size is incompatible."); // Launch the xorshift kernels. PROF_CUDA_START(stream); for (int i=0; i<NUM_LAUNCHES; i++) { // Execute the kernel. unsigned long long hash = (i+1); CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); PROF_CUDA_BEGIN(PROF_XORSHIFT_INT,stream); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((xorshift_int_kernel), dim3(gridSize),dim3(threadBlockSize),0,stream, (unsigned int *)outLattice, gridXSize, latticeXSize, latticeXYSize, latticeXYZSize, hash))); PROF_CUDA_END(PROF_XORSHIFT_INT,stream); } for (int i=0; i<NUM_LAUNCHES; i++) { // Execute the kernel. unsigned long long hash = (i+1); CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); PROF_CUDA_BEGIN(PROF_XORSHIFT_FLOAT,stream); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((xorshift_float_kernel), dim3(gridSize),dim3(threadBlockSize),0,stream, (unsigned int *)outLattice, gridXSize, latticeXSize, latticeXYSize, latticeXYZSize, hash))); PROF_CUDA_END(PROF_XORSHIFT_FLOAT,stream); } CUDA_EXCEPTION_CHECK(hipStreamSynchronize(stream)); PROF_CUDA_FINISH(stream); // Initialize the xorwow state. //printf("State init\n"); CUDA_EXCEPTION_CHECK(hipThreadSetLimit(hipLimitStackSize, 16384)); PROF_CUDA_START(stream); CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(hipMemset(rngState, 0, X_SIZE*Y_SIZE*Z_SIZE*sizeof(hiprandState_t))); PROF_CUDA_BEGIN(PROF_XORWOW_INIT,stream); dim3 gridSizeInit(X_SIZE/32,1,1), threadBlockSizeInit(32,1,1); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((xorwow_init_kernel), dim3(gridSizeInit),dim3(threadBlockSizeInit),0,stream, (unsigned int *)outLattice, latticeXSize, latticeYSize, latticeZSize, (hiprandState_t *)rngState))); PROF_CUDA_END(PROF_XORWOW_INIT,stream); CUDA_EXCEPTION_CHECK(hipStreamSynchronize(stream)); PROF_CUDA_FINISH(stream); CUDA_EXCEPTION_CHECK(hipThreadSetLimit(hipLimitStackSize, 1024)); //printf("Done state init\n"); // Make sure the init was done correctly. CUDA_EXCEPTION_CHECK(hipMemcpy(hostOutLattice, outLattice, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyDeviceToHost)); /* for (int z=0, i=0; z<3; z++) { printf("%2d--------------------------------------------------------------\n",z); for (int y=0; y<Y_SIZE; y++) { for (int x=0; x<X_SIZE; x++, i++) { printf("%d ",hostOutLattice[i]); } printf("\n"); } } int totalL=0; int totalU=0; for (int z=0, i=0; z<Z_SIZE; z++) { for (int y=0; y<Y_SIZE; y++) { for (int x=0; x<X_SIZE; x++, i++) { totalL += hostOutLattice[i]; totalU += hostOutLattice[i+latticeXYZSize]; } } } printf("Total initialized sites: %d (lower), %d (upper)\n",totalL, totalU); */ // Launch the xorwow kernels. PROF_CUDA_START(stream); for (int i=0; i<NUM_LAUNCHES; i++) { // Execute the kernel. CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); PROF_CUDA_BEGIN(PROF_XORWOW_INT,stream); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((xorwow_int_kernel), dim3(gridSize),dim3(threadBlockSize),0,stream, (unsigned int *)outLattice, gridXSize, latticeXSize, latticeXYSize, latticeXYZSize, (hiprandState_t *)rngState))); PROF_CUDA_END(PROF_XORWOW_INT,stream); } for (int i=0; i<NUM_LAUNCHES; i++) { // Execute the kernel. CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); PROF_CUDA_BEGIN(PROF_XORWOW_FLOAT,stream); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((xorwow_float_kernel), dim3(gridSize),dim3(threadBlockSize),0,stream, (unsigned int *)outLattice, gridXSize, latticeXSize, latticeXYSize, latticeXYZSize, (hiprandState_t *)rngState))); PROF_CUDA_END(PROF_XORWOW_FLOAT,stream); } CUDA_EXCEPTION_CHECK(hipStreamSynchronize(stream)); PROF_CUDA_FINISH(stream); // Free any resources. CUDA_EXCEPTION_CHECK(hipFree(rngState)); CUDA_EXCEPTION_CHECK(hipFree(outLattice)); CUDA_EXCEPTION_CHECK(hipStreamDestroy(stream)); delete[] hostOutLattice; PROF_END(PROF_SUBMIT_KERNELS); printf("Profile file saved as: %s\n",PROF_MAKE_STR(PROF_OUT_FILE)); PROF_END(PROF_MAIN_RUN); PROF_WRITE; return 0; } catch (std::exception& e) { std::cerr << "Exception during execution: " << e.what() << std::endl; } catch (...) { std::cerr << "Unknown Exception during execution." << std::endl; } PROF_END(PROF_MAIN_RUN); PROF_WRITE; return -1; } __global__ void xorshift_int_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z; unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x; int sum=0; for (int i=0; i<16; i++) { unsigned int randomValue = getRandomHash(latticeIndex, 4, i, timestepHash); sum += (randomValue>2147483648)?1:0; } outLattice[latticeIndex] = sum; } __global__ void xorshift_float_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z; unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x; int sum=0; for (int i=0; i<16; i++) { float randomValue = getRandomHashFloat(latticeIndex, 4, i, timestepHash); sum += (randomValue>0.5)?1:0; } outLattice[latticeIndex] = sum; } __global__ void xorwow_init_kernel(unsigned int* outLattice, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize, hiprandState_t *rngState) { int latticeXIndex = (blockIdx.x*blockDim.x)+threadIdx.x; for (int latticeZIndex=0; latticeZIndex<latticeZSize; latticeZIndex++) { for (int latticeYIndex=0; latticeYIndex<latticeYSize; latticeYIndex++) { unsigned int latticeIndex = (latticeZIndex*latticeXSize*latticeYSize) + (latticeYIndex*latticeXSize) + latticeXIndex; hiprand_init(1234, latticeIndex, 0, &rngState[latticeIndex]); outLattice[latticeIndex] += 1; } } } __global__ void xorwow_int_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, hiprandState_t *rngState) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z; unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x; hiprandState_t localRngState = rngState[latticeIndex]; int sum=0; for (int i=0; i<16; i++) { unsigned int randomValue = hiprand(&localRngState); sum += (randomValue>2147483648)?1:0; } rngState[latticeIndex] = localRngState; outLattice[latticeIndex] = sum; } __global__ void xorwow_float_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, hiprandState_t *rngState) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z; unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x; hiprandState_t localRngState = rngState[latticeIndex]; int sum=0; for (int i=0; i<16; i++) { float randomValue = hiprand_uniform(&localRngState); sum += (randomValue>0.5)?1:0; } rngState[latticeIndex] = localRngState; outLattice[latticeIndex] = sum; }
18473bc50611e069944ad3b4b25f8d929adeab6d.cu
/* * University of Illinois Open Source License * Copyright 2010 Luthey-Schulten Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, nor the names of its contributors may be used to endorse or * promote products derived from this Software without specific prior written * permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Elijah Roberts */ #include <cstdio> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> #define PROF_ENABLE #define PROF_MAX_THREADS 1 #define PROF_MAX_EVENTS 1000 #define PROF_MAX_CUDA_EVENT_BUFFER 1000 #include "lptf/Profile.h" #undef PROF_ENABLE #include "lm/Cuda.h" #include "TimingConstants.h" #define LS_WORDS_PER_SITE 2 #define LS_APRON_SIZE 1 #define LS_X_BLOCK_MAX_X_SIZE 256 #define LS_Y_BLOCK_X_SIZE 32 #define LS_Y_BLOCK_Y_SIZE 4 #define LS_Z_BLOCK_X_SIZE 32 #define LS_Z_BLOCK_Z_SIZE 4 #include "lm/rdme/dev/xor_random_dev.cu" #include "lm/rdme/dev/lattice_sim_1d_dev.cu" #include <curand_kernel.h> // Allocate the profile space. PROF_ALLOC; #define X_SIZE 128 #define Y_SIZE 128 #define Z_SIZE 64 #define NUM_LAUNCHES 100 __global__ void xorshift_int_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash); __global__ void xorshift_float_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash); __global__ void xorwow_init_kernel(unsigned int* outLattice, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize, curandState *rngState); __global__ void xorwow_int_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, curandState *state); __global__ void xorwow_float_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, curandState *state); int main(int argc, char **argv) { try { PROF_INIT; PROF_BEGIN(PROF_MAIN_RUN); // Allocate the cuda resources. cudaStream_t stream; unsigned int * hostOutLattice = new unsigned int[X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE]; void* outLattice; void* rngState; CUDA_EXCEPTION_CHECK(cudaStreamCreate(&stream)); CUDA_EXCEPTION_CHECK(cudaMalloc(&outLattice, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(cudaMalloc(&rngState, X_SIZE*Y_SIZE*Z_SIZE*sizeof(curandState))); // Start timings the kernels. PROF_BEGIN(PROF_SUBMIT_KERNELS); // Calculate some properties of the lattice. const unsigned int latticeXSize = X_SIZE; const unsigned int latticeYSize = Y_SIZE; const unsigned int latticeZSize = Z_SIZE; const unsigned int latticeXYSize = X_SIZE*Y_SIZE; const unsigned int latticeXYZSize = X_SIZE*Y_SIZE*Z_SIZE; unsigned int gridXSize; dim3 gridSize, threadBlockSize; if (!calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize)) throw lm::InvalidArgException("Unable to calculate correct launch parameters, the lattice size is incompatible."); // Launch the xorshift kernels. PROF_CUDA_START(stream); for (int i=0; i<NUM_LAUNCHES; i++) { // Execute the kernel. unsigned long long hash = (i+1); CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); PROF_CUDA_BEGIN(PROF_XORSHIFT_INT,stream); CUDA_EXCEPTION_EXECUTE((xorshift_int_kernel<<<gridSize,threadBlockSize,0,stream>>>((unsigned int *)outLattice, gridXSize, latticeXSize, latticeXYSize, latticeXYZSize, hash))); PROF_CUDA_END(PROF_XORSHIFT_INT,stream); } for (int i=0; i<NUM_LAUNCHES; i++) { // Execute the kernel. unsigned long long hash = (i+1); CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); PROF_CUDA_BEGIN(PROF_XORSHIFT_FLOAT,stream); CUDA_EXCEPTION_EXECUTE((xorshift_float_kernel<<<gridSize,threadBlockSize,0,stream>>>((unsigned int *)outLattice, gridXSize, latticeXSize, latticeXYSize, latticeXYZSize, hash))); PROF_CUDA_END(PROF_XORSHIFT_FLOAT,stream); } CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(stream)); PROF_CUDA_FINISH(stream); // Initialize the xorwow state. //printf("State init\n"); CUDA_EXCEPTION_CHECK(cudaThreadSetLimit(cudaLimitStackSize, 16384)); PROF_CUDA_START(stream); CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(cudaMemset(rngState, 0, X_SIZE*Y_SIZE*Z_SIZE*sizeof(curandState))); PROF_CUDA_BEGIN(PROF_XORWOW_INIT,stream); dim3 gridSizeInit(X_SIZE/32,1,1), threadBlockSizeInit(32,1,1); CUDA_EXCEPTION_EXECUTE((xorwow_init_kernel<<<gridSizeInit,threadBlockSizeInit,0,stream>>>((unsigned int *)outLattice, latticeXSize, latticeYSize, latticeZSize, (curandState *)rngState))); PROF_CUDA_END(PROF_XORWOW_INIT,stream); CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(stream)); PROF_CUDA_FINISH(stream); CUDA_EXCEPTION_CHECK(cudaThreadSetLimit(cudaLimitStackSize, 1024)); //printf("Done state init\n"); // Make sure the init was done correctly. CUDA_EXCEPTION_CHECK(cudaMemcpy(hostOutLattice, outLattice, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* for (int z=0, i=0; z<3; z++) { printf("%2d--------------------------------------------------------------\n",z); for (int y=0; y<Y_SIZE; y++) { for (int x=0; x<X_SIZE; x++, i++) { printf("%d ",hostOutLattice[i]); } printf("\n"); } } int totalL=0; int totalU=0; for (int z=0, i=0; z<Z_SIZE; z++) { for (int y=0; y<Y_SIZE; y++) { for (int x=0; x<X_SIZE; x++, i++) { totalL += hostOutLattice[i]; totalU += hostOutLattice[i+latticeXYZSize]; } } } printf("Total initialized sites: %d (lower), %d (upper)\n",totalL, totalU); */ // Launch the xorwow kernels. PROF_CUDA_START(stream); for (int i=0; i<NUM_LAUNCHES; i++) { // Execute the kernel. CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); PROF_CUDA_BEGIN(PROF_XORWOW_INT,stream); CUDA_EXCEPTION_EXECUTE((xorwow_int_kernel<<<gridSize,threadBlockSize,0,stream>>>((unsigned int *)outLattice, gridXSize, latticeXSize, latticeXYSize, latticeXYZSize, (curandState *)rngState))); PROF_CUDA_END(PROF_XORWOW_INT,stream); } for (int i=0; i<NUM_LAUNCHES; i++) { // Execute the kernel. CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0, X_SIZE*Y_SIZE*Z_SIZE*LS_WORDS_PER_SITE*sizeof(unsigned int))); PROF_CUDA_BEGIN(PROF_XORWOW_FLOAT,stream); CUDA_EXCEPTION_EXECUTE((xorwow_float_kernel<<<gridSize,threadBlockSize,0,stream>>>((unsigned int *)outLattice, gridXSize, latticeXSize, latticeXYSize, latticeXYZSize, (curandState *)rngState))); PROF_CUDA_END(PROF_XORWOW_FLOAT,stream); } CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(stream)); PROF_CUDA_FINISH(stream); // Free any resources. CUDA_EXCEPTION_CHECK(cudaFree(rngState)); CUDA_EXCEPTION_CHECK(cudaFree(outLattice)); CUDA_EXCEPTION_CHECK(cudaStreamDestroy(stream)); delete[] hostOutLattice; PROF_END(PROF_SUBMIT_KERNELS); printf("Profile file saved as: %s\n",PROF_MAKE_STR(PROF_OUT_FILE)); PROF_END(PROF_MAIN_RUN); PROF_WRITE; return 0; } catch (std::exception& e) { std::cerr << "Exception during execution: " << e.what() << std::endl; } catch (...) { std::cerr << "Unknown Exception during execution." << std::endl; } PROF_END(PROF_MAIN_RUN); PROF_WRITE; return -1; } __global__ void xorshift_int_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z; unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x; int sum=0; for (int i=0; i<16; i++) { unsigned int randomValue = getRandomHash(latticeIndex, 4, i, timestepHash); sum += (randomValue>2147483648)?1:0; } outLattice[latticeIndex] = sum; } __global__ void xorshift_float_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, const unsigned long long timestepHash) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z; unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x; int sum=0; for (int i=0; i<16; i++) { float randomValue = getRandomHashFloat(latticeIndex, 4, i, timestepHash); sum += (randomValue>0.5)?1:0; } outLattice[latticeIndex] = sum; } __global__ void xorwow_init_kernel(unsigned int* outLattice, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize, curandState *rngState) { int latticeXIndex = (blockIdx.x*blockDim.x)+threadIdx.x; for (int latticeZIndex=0; latticeZIndex<latticeZSize; latticeZIndex++) { for (int latticeYIndex=0; latticeYIndex<latticeYSize; latticeYIndex++) { unsigned int latticeIndex = (latticeZIndex*latticeXSize*latticeYSize) + (latticeYIndex*latticeXSize) + latticeXIndex; curand_init(1234, latticeIndex, 0, &rngState[latticeIndex]); outLattice[latticeIndex] += 1; } } } __global__ void xorwow_int_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, curandState *rngState) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z; unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x; curandState localRngState = rngState[latticeIndex]; int sum=0; for (int i=0; i<16; i++) { unsigned int randomValue = curand(&localRngState); sum += (randomValue>2147483648)?1:0; } rngState[latticeIndex] = localRngState; outLattice[latticeIndex] = sum; } __global__ void xorwow_float_kernel(unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeXYSize, const unsigned int latticeXYZSize, curandState *rngState) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z; unsigned int latticeIndex = (latticeZIndex*latticeXYSize) + (by*latticeXSize) + (bx*blockDim.x) + threadIdx.x; curandState localRngState = rngState[latticeIndex]; int sum=0; for (int i=0; i<16; i++) { float randomValue = curand_uniform(&localRngState); sum += (randomValue>0.5)?1:0; } rngState[latticeIndex] = localRngState; outLattice[latticeIndex] = sum; }
c77195286f096e25159bddf89bf992f3391f9f17.hip
// !!! This is a file automatically generated by hipify!!! /********************************************************************** filename: gpu/baseline/bfs.cpp author: onesuper email: [email protected] bfs algorithm implemented by CUDA without any optimization. ***********************************************************************/ #include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> #define MAX_THREAD_PER_BLOCK 1024 #define THREAD_PER_BLOCK 128 #define MAX_LOCAL_QUEUE_SIZE 1200 /* the method is a little different with the naive one it uses two current sets */ __global__ static void bfs_kernel(unsigned int* current_set, unsigned int* new_set, int current_set_size, int* current_set_size_new, Node* node_list, Edge* edge_list, int* color, int* cost, int level) { int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int local_queue[MAX_LOCAL_QUEUE_SIZE]; __shared__ int local_queue_size; __shared__ int new_set_cursor; __syncthreads(); if (threadIdx.x == 0) { new_set_cursor = 0; } __syncthreads(); for(int j=tid; j<(current_set_size+blockDim.x*gridDim.x); j+=blockDim.x*gridDim.x) { __syncthreads(); if (threadIdx.x == 0) { local_queue_size = 0; } __syncthreads(); if (j < current_set_size) { unsigned int index = current_set[j];// fetch one from the current set current_set[j] = 0; // erase it cost[index] = level; Node cur_node = node_list[index]; for (int i=cur_node.start; i < cur_node.start + cur_node.edge_num; i++) { unsigned int id = edge_list[i].dest; if (color[id] == WHITE) { int its_color = atomicExch((int*) &color[id], BLACK); if (its_color == WHITE) { int write_position = atomicAdd((int*) &local_queue_size, 1); local_queue[write_position] = id; //printf("push %d to local queue total:%d \n", id, local_queue_size); } } } } __syncthreads(); if (threadIdx.x == 0) { new_set_cursor = atomicAdd((int*) &(*current_set_size_new), local_queue_size); // printf("new_set_cursor = %d\n", new_set_cursor); } __syncthreads(); for (int i=threadIdx.x; i<local_queue_size; i+=blockDim.x) { new_set[new_set_cursor+i] = local_queue[i]; //printf("push %d to next queue total:%d \n",local_queue[i], *current_set_size_new); } __syncthreads(); } } float bfs(int block_in_a_grid) { struct timeval start, end; float time_used; gettimeofday(&start, 0); // visiting the source node now(CPU) color[source_node_no] = BLACK; current_set[0]= source_node_no; cost[source_node_no] = 0; // synchronize to GPU mem hipMemcpy(d_color, color, sizeof(int) * num_of_nodes, hipMemcpyHostToDevice); hipMemcpy(d_current_set_a, current_set, sizeof(unsigned int) * num_of_nodes, hipMemcpyHostToDevice); hipMemcpy(d_cost, cost, sizeof(int) * num_of_nodes, hipMemcpyHostToDevice); //hipMemset(d_color, BLACK, sizeof(int)); //hipMemset(d_current_set_a[0], source_node_no, sizeof(unsigned int)); //hipMemset(d_cost[source_node_no], 0, sizeof(int)); int current_set_size = 1; // only source node in it int block_num = block_in_a_grid; int thread_num = THREAD_PER_BLOCK; int level = 0; // used to control the current_set_a/b to visit while(current_set_size != 0) { if (level%2 == 0) { hipMemset(d_current_set_size_new, 0, sizeof(int)); hipLaunchKernelGGL(( bfs_kernel), dim3(block_num), dim3(thread_num), 10000, 0, d_current_set_a, d_current_set_b, current_set_size, d_current_set_size_new, d_node_list, d_edge_list, d_color, d_cost, level); hipDeviceSynchronize(); hipMemcpy(current_set_size_new, d_current_set_size_new, sizeof(int), hipMemcpyDeviceToHost); current_set_size = *current_set_size_new; //printf("%d:%d\n",level,current_set_size); } else { hipMemset(d_current_set_size_new, 0, sizeof(int)); hipLaunchKernelGGL(( bfs_kernel), dim3(block_num), dim3(thread_num), 10000, 0, d_current_set_b, d_current_set_a, current_set_size, d_current_set_size_new, d_node_list, d_edge_list, d_color, d_cost, level); hipDeviceSynchronize(); hipMemcpy(current_set_size_new, d_current_set_size_new, sizeof(int), hipMemcpyDeviceToHost); current_set_size = *current_set_size_new; //printf("%d:%d\n",level,current_set_size); } level++; } // copy the result from GPU to CPU mem hipMemcpy(cost, d_cost, sizeof(unsigned int)*num_of_nodes, hipMemcpyDeviceToHost); // come out the time gettimeofday(&end, 0); time_used = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; time_used /= 1000000; printf("used time: %f\n", time_used); return time_used; }
c77195286f096e25159bddf89bf992f3391f9f17.cu
/********************************************************************** filename: gpu/baseline/bfs.cpp author: onesuper email: [email protected] bfs algorithm implemented by CUDA without any optimization. ***********************************************************************/ #include <cuda.h> #include <stdio.h> #include <sys/time.h> #define MAX_THREAD_PER_BLOCK 1024 #define THREAD_PER_BLOCK 128 #define MAX_LOCAL_QUEUE_SIZE 1200 /* the method is a little different with the naive one it uses two current sets */ __global__ static void bfs_kernel(unsigned int* current_set, unsigned int* new_set, int current_set_size, int* current_set_size_new, Node* node_list, Edge* edge_list, int* color, int* cost, int level) { int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int local_queue[MAX_LOCAL_QUEUE_SIZE]; __shared__ int local_queue_size; __shared__ int new_set_cursor; __syncthreads(); if (threadIdx.x == 0) { new_set_cursor = 0; } __syncthreads(); for(int j=tid; j<(current_set_size+blockDim.x*gridDim.x); j+=blockDim.x*gridDim.x) { __syncthreads(); if (threadIdx.x == 0) { local_queue_size = 0; } __syncthreads(); if (j < current_set_size) { unsigned int index = current_set[j];// fetch one from the current set current_set[j] = 0; // erase it cost[index] = level; Node cur_node = node_list[index]; for (int i=cur_node.start; i < cur_node.start + cur_node.edge_num; i++) { unsigned int id = edge_list[i].dest; if (color[id] == WHITE) { int its_color = atomicExch((int*) &color[id], BLACK); if (its_color == WHITE) { int write_position = atomicAdd((int*) &local_queue_size, 1); local_queue[write_position] = id; //printf("push %d to local queue total:%d \n", id, local_queue_size); } } } } __syncthreads(); if (threadIdx.x == 0) { new_set_cursor = atomicAdd((int*) &(*current_set_size_new), local_queue_size); // printf("new_set_cursor = %d\n", new_set_cursor); } __syncthreads(); for (int i=threadIdx.x; i<local_queue_size; i+=blockDim.x) { new_set[new_set_cursor+i] = local_queue[i]; //printf("push %d to next queue total:%d \n",local_queue[i], *current_set_size_new); } __syncthreads(); } } float bfs(int block_in_a_grid) { struct timeval start, end; float time_used; gettimeofday(&start, 0); // visiting the source node now(CPU) color[source_node_no] = BLACK; current_set[0]= source_node_no; cost[source_node_no] = 0; // synchronize to GPU mem cudaMemcpy(d_color, color, sizeof(int) * num_of_nodes, cudaMemcpyHostToDevice); cudaMemcpy(d_current_set_a, current_set, sizeof(unsigned int) * num_of_nodes, cudaMemcpyHostToDevice); cudaMemcpy(d_cost, cost, sizeof(int) * num_of_nodes, cudaMemcpyHostToDevice); //cudaMemset(d_color, BLACK, sizeof(int)); //cudaMemset(d_current_set_a[0], source_node_no, sizeof(unsigned int)); //cudaMemset(d_cost[source_node_no], 0, sizeof(int)); int current_set_size = 1; // only source node in it int block_num = block_in_a_grid; int thread_num = THREAD_PER_BLOCK; int level = 0; // used to control the current_set_a/b to visit while(current_set_size != 0) { if (level%2 == 0) { cudaMemset(d_current_set_size_new, 0, sizeof(int)); bfs_kernel<<<block_num, thread_num, 10000>>>(d_current_set_a, d_current_set_b, current_set_size, d_current_set_size_new, d_node_list, d_edge_list, d_color, d_cost, level); cudaThreadSynchronize(); cudaMemcpy(current_set_size_new, d_current_set_size_new, sizeof(int), cudaMemcpyDeviceToHost); current_set_size = *current_set_size_new; //printf("%d:%d\n",level,current_set_size); } else { cudaMemset(d_current_set_size_new, 0, sizeof(int)); bfs_kernel<<<block_num, thread_num, 10000>>>(d_current_set_b, d_current_set_a, current_set_size, d_current_set_size_new, d_node_list, d_edge_list, d_color, d_cost, level); cudaThreadSynchronize(); cudaMemcpy(current_set_size_new, d_current_set_size_new, sizeof(int), cudaMemcpyDeviceToHost); current_set_size = *current_set_size_new; //printf("%d:%d\n",level,current_set_size); } level++; } // copy the result from GPU to CPU mem cudaMemcpy(cost, d_cost, sizeof(unsigned int)*num_of_nodes, cudaMemcpyDeviceToHost); // come out the time gettimeofday(&end, 0); time_used = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec; time_used /= 1000000; printf("used time: %f\n", time_used); return time_used; }
64a0342aa2e5aa3a6db641b3d37c8a3a4526e921.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kDumbSumCols(float* mat, float* vec, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; mat += idx; if (idx < width) { float sum = 0; for (int j = 0; j < height; j++) { sum += *mat; mat += width; } vec[idx] = sum; } }
64a0342aa2e5aa3a6db641b3d37c8a3a4526e921.cu
#include "includes.h" __global__ void kDumbSumCols(float* mat, float* vec, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; mat += idx; if (idx < width) { float sum = 0; for (int j = 0; j < height; j++) { sum += *mat; mat += width; } vec[idx] = sum; } }
ff9a876d15e624354608dbc97802e792c5670099.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // NeuralNetwork.teach.GpuTrainer extern "C" __global__ void CalculateChangeDeltaAndError( double* error, int errorLen0, double* inputs, int inputsLen0, int inputsLen1, double* previousChangeDelta, int previousChangeDeltaLen0, int previousChangeDeltaLen1, double* weights, int weightsLen0, int weightsLen1, double* changeDelta, int changeDeltaLen0, int changeDeltaLen1, double* backPropError, int backPropErrorLen0, int backPropErrorLen1); // NeuralNetwork.teach.GpuTrainer extern "C" __global__ void CalculateChangeDeltaAndError( double* error, int errorLen0, double* inputs, int inputsLen0, int inputsLen1, double* previousChangeDelta, int previousChangeDeltaLen0, int previousChangeDeltaLen1, double* weights, int weightsLen0, int weightsLen1, double* changeDelta, int changeDeltaLen0, int changeDeltaLen1, double* backPropError, int backPropErrorLen0, int backPropErrorLen1) { int x = blockIdx.x; int y = blockIdx.y; if (y < 1024) { changeDelta[(y) * changeDeltaLen1 + ( x)] = error[(y)] * inputs[(y) * inputsLen1 + ( x)] * 0.3 + previousChangeDelta[(y) * previousChangeDeltaLen1 + ( x)] * 0.05; backPropError[(y) * backPropErrorLen1 + ( x)] = error[(y)] * weights[(y) * weightsLen1 + ( x)]; } }
ff9a876d15e624354608dbc97802e792c5670099.cu
// NeuralNetwork.teach.GpuTrainer extern "C" __global__ void CalculateChangeDeltaAndError( double* error, int errorLen0, double* inputs, int inputsLen0, int inputsLen1, double* previousChangeDelta, int previousChangeDeltaLen0, int previousChangeDeltaLen1, double* weights, int weightsLen0, int weightsLen1, double* changeDelta, int changeDeltaLen0, int changeDeltaLen1, double* backPropError, int backPropErrorLen0, int backPropErrorLen1); // NeuralNetwork.teach.GpuTrainer extern "C" __global__ void CalculateChangeDeltaAndError( double* error, int errorLen0, double* inputs, int inputsLen0, int inputsLen1, double* previousChangeDelta, int previousChangeDeltaLen0, int previousChangeDeltaLen1, double* weights, int weightsLen0, int weightsLen1, double* changeDelta, int changeDeltaLen0, int changeDeltaLen1, double* backPropError, int backPropErrorLen0, int backPropErrorLen1) { int x = blockIdx.x; int y = blockIdx.y; if (y < 1024) { changeDelta[(y) * changeDeltaLen1 + ( x)] = error[(y)] * inputs[(y) * inputsLen1 + ( x)] * 0.3 + previousChangeDelta[(y) * previousChangeDeltaLen1 + ( x)] * 0.05; backPropError[(y) * backPropErrorLen1 + ( x)] = error[(y)] * weights[(y) * weightsLen1 + ( x)]; } }
b11d648d87f31a48141f19102efa6913e2375c72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void DownsampleForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int downsampled_height, const int downsampled_width, const int kernel_h, const int kernel_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int dw = index % downsampled_width; int dh = (index / downsampled_width) % downsampled_height; int c = (index / downsampled_width / downsampled_height) % channels; int n = index / downsampled_width / downsampled_height / channels; bottom_data += (n * channels + c) * height * width; int w = static_cast<int>(static_cast<float>(dw) * kernel_w); int h = static_cast<int>(static_cast<float>(dh) * kernel_h); w = min(w, width); h = min(h, height); top_data[index] = bottom_data[h * width + w]; } } template <typename Dtype> void DownsamplingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); int count = (*top)[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DownsampleForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, downsampled_height_, downsampled_width_, kernel_h_, kernel_w_, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void DownsampleBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int downsampled_height, const int downsampled_width, const int kernel_h, const int kernel_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; top_diff += (n * channels + c) * downsampled_height * downsampled_width; if (w % kernel_w == 0 && h % kernel_h == 0) { int dw = static_cast<int>(static_cast<float>(w) / kernel_w); int dh = static_cast<int>(static_cast<float>(h) / kernel_h); bottom_diff[index] = top_diff[dh * downsampled_width + dw]; } else { bottom_diff[index] = 0; } } } template <typename Dtype> void DownsamplingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const int count = (*bottom)[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DownsampleBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, top[0]->num(), channels_, height_, width_, downsampled_height_, downsampled_width_, kernel_h_, kernel_w_, bottom_diff); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_CLASS(DownsamplingLayer); } // namespace caffe
b11d648d87f31a48141f19102efa6913e2375c72.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void DownsampleForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int downsampled_height, const int downsampled_width, const int kernel_h, const int kernel_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int dw = index % downsampled_width; int dh = (index / downsampled_width) % downsampled_height; int c = (index / downsampled_width / downsampled_height) % channels; int n = index / downsampled_width / downsampled_height / channels; bottom_data += (n * channels + c) * height * width; int w = static_cast<int>(static_cast<float>(dw) * kernel_w); int h = static_cast<int>(static_cast<float>(dh) * kernel_h); w = min(w, width); h = min(h, height); top_data[index] = bottom_data[h * width + w]; } } template <typename Dtype> void DownsamplingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); int count = (*top)[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) DownsampleForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, downsampled_height_, downsampled_width_, kernel_h_, kernel_w_, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void DownsampleBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int downsampled_height, const int downsampled_width, const int kernel_h, const int kernel_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; top_diff += (n * channels + c) * downsampled_height * downsampled_width; if (w % kernel_w == 0 && h % kernel_h == 0) { int dw = static_cast<int>(static_cast<float>(w) / kernel_w); int dh = static_cast<int>(static_cast<float>(h) / kernel_h); bottom_diff[index] = top_diff[dh * downsampled_width + dw]; } else { bottom_diff[index] = 0; } } } template <typename Dtype> void DownsamplingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const int count = (*bottom)[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) DownsampleBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, top[0]->num(), channels_, height_, width_, downsampled_height_, downsampled_width_, kernel_h_, kernel_w_, bottom_diff); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_CLASS(DownsamplingLayer); } // namespace caffe
d6df55dfe48b7ad5e692642277cc5429f7e1aed8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "searchsorted_cuda_kernel.h" template <typename scalar_t> __device__ int eval(scalar_t val, scalar_t *a, int64_t row, int64_t col, int64_t ncol, bool side_left) { /* Evaluates whether a[row,col] < val <= a[row, col+1]*/ if (col == ncol - 1) { // special case: we are on the right border if (a[row * ncol + col] <= val){ return 1;} else { return -1;} } bool is_lower; bool is_next_higher; if (side_left) { // a[row, col] < v <= a[row, col+1] is_lower = (a[row * ncol + col] < val); is_next_higher = (a[row*ncol + col + 1] >= val); } else { // a[row, col] <= v < a[row, col+1] is_lower = (a[row * ncol + col] <= val); is_next_higher = (a[row * ncol + col + 1] > val); } if (is_lower && is_next_higher) { // we found the right spot return 0; } else if (is_lower) { // answer is on the right side return 1; } else { // answer is on the left side return -1; } } template <typename scalar_t> __device__ int binary_search(scalar_t *a, int64_t row, scalar_t val, int64_t ncol, bool side_left) { /* Look for the value `val` within row `row` of matrix `a`, which has `ncol` columns. the `a` matrix is assumed sorted in increasing order, row-wise Returns * -1 if `val` is smaller than the smallest value found within that row of `a` * `ncol` - 1 if `val` is larger than the largest element of that row of `a` * Otherwise, return the column index `res` such that: - a[row, col] < val <= a[row, col+1]. (if side_left), or - a[row, col] < val <= a[row, col+1] (if not side_left). */ //start with left at 0 and right at number of columns of a int64_t right = ncol; int64_t left = 0; while (right >= left) { // take the midpoint of current left and right cursors int64_t mid = left + (right-left)/2; // check the relative position of val: are we good here ? int rel_pos = eval(val, a, row, mid, ncol, side_left); // we found the point if(rel_pos == 0) { return mid; } else if (rel_pos > 0) { if (mid==ncol-1){return ncol-1;} // the answer is on the right side left = mid; } else { if (mid==0){return -1;} right = mid; } } return -1; } template <typename scalar_t> __global__ void searchsorted_kernel( int64_t *res, scalar_t *a, scalar_t *v, int64_t nrow_res, int64_t nrow_a, int64_t nrow_v, int64_t ncol_a, int64_t ncol_v, bool side_left) { // get current row and column int64_t row = blockIdx.y*blockDim.y+threadIdx.y; int64_t col = blockIdx.x*blockDim.x+threadIdx.x; // check whether we are outside the bounds of what needs be computed. if ((row >= nrow_res) || (col >= ncol_v)) { return;} // get the value to look for int64_t row_in_v = (nrow_v==1) ? 0: row; int64_t row_in_a = (nrow_a==1) ? 0: row; int64_t idx_in_v = row_in_v*ncol_v+col; int64_t idx_in_res = row*ncol_v+col; // apply binary search res[idx_in_res] = binary_search(a, row_in_a, v[idx_in_v], ncol_a, side_left)+1; } void searchsorted_cuda( at::Tensor a, at::Tensor v, at::Tensor res, bool side_left){ // Get the dimensions auto nrow_a = a.size(/*dim=*/0); auto nrow_v = v.size(/*dim=*/0); auto ncol_a = a.size(/*dim=*/1); auto ncol_v = v.size(/*dim=*/1); auto nrow_res = fmax(double(nrow_a), double(nrow_v)); // prepare the kernel configuration dim3 threads(ncol_v, nrow_res); dim3 blocks(1, 1); if (nrow_res*ncol_v > 1024){ threads.x = int(fmin(double(1024), double(ncol_v))); threads.y = floor(1024/threads.x); blocks.x = ceil(double(ncol_v)/double(threads.x)); blocks.y = ceil(double(nrow_res)/double(threads.y)); } AT_DISPATCH_ALL_TYPES(a.type(), "searchsorted cuda", ([&] { hipLaunchKernelGGL(( searchsorted_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, res.data<int64_t>(), a.data<scalar_t>(), v.data<scalar_t>(), nrow_res, nrow_a, nrow_v, ncol_a, ncol_v, side_left); })); }
d6df55dfe48b7ad5e692642277cc5429f7e1aed8.cu
#include "searchsorted_cuda_kernel.h" template <typename scalar_t> __device__ int eval(scalar_t val, scalar_t *a, int64_t row, int64_t col, int64_t ncol, bool side_left) { /* Evaluates whether a[row,col] < val <= a[row, col+1]*/ if (col == ncol - 1) { // special case: we are on the right border if (a[row * ncol + col] <= val){ return 1;} else { return -1;} } bool is_lower; bool is_next_higher; if (side_left) { // a[row, col] < v <= a[row, col+1] is_lower = (a[row * ncol + col] < val); is_next_higher = (a[row*ncol + col + 1] >= val); } else { // a[row, col] <= v < a[row, col+1] is_lower = (a[row * ncol + col] <= val); is_next_higher = (a[row * ncol + col + 1] > val); } if (is_lower && is_next_higher) { // we found the right spot return 0; } else if (is_lower) { // answer is on the right side return 1; } else { // answer is on the left side return -1; } } template <typename scalar_t> __device__ int binary_search(scalar_t *a, int64_t row, scalar_t val, int64_t ncol, bool side_left) { /* Look for the value `val` within row `row` of matrix `a`, which has `ncol` columns. the `a` matrix is assumed sorted in increasing order, row-wise Returns * -1 if `val` is smaller than the smallest value found within that row of `a` * `ncol` - 1 if `val` is larger than the largest element of that row of `a` * Otherwise, return the column index `res` such that: - a[row, col] < val <= a[row, col+1]. (if side_left), or - a[row, col] < val <= a[row, col+1] (if not side_left). */ //start with left at 0 and right at number of columns of a int64_t right = ncol; int64_t left = 0; while (right >= left) { // take the midpoint of current left and right cursors int64_t mid = left + (right-left)/2; // check the relative position of val: are we good here ? int rel_pos = eval(val, a, row, mid, ncol, side_left); // we found the point if(rel_pos == 0) { return mid; } else if (rel_pos > 0) { if (mid==ncol-1){return ncol-1;} // the answer is on the right side left = mid; } else { if (mid==0){return -1;} right = mid; } } return -1; } template <typename scalar_t> __global__ void searchsorted_kernel( int64_t *res, scalar_t *a, scalar_t *v, int64_t nrow_res, int64_t nrow_a, int64_t nrow_v, int64_t ncol_a, int64_t ncol_v, bool side_left) { // get current row and column int64_t row = blockIdx.y*blockDim.y+threadIdx.y; int64_t col = blockIdx.x*blockDim.x+threadIdx.x; // check whether we are outside the bounds of what needs be computed. if ((row >= nrow_res) || (col >= ncol_v)) { return;} // get the value to look for int64_t row_in_v = (nrow_v==1) ? 0: row; int64_t row_in_a = (nrow_a==1) ? 0: row; int64_t idx_in_v = row_in_v*ncol_v+col; int64_t idx_in_res = row*ncol_v+col; // apply binary search res[idx_in_res] = binary_search(a, row_in_a, v[idx_in_v], ncol_a, side_left)+1; } void searchsorted_cuda( at::Tensor a, at::Tensor v, at::Tensor res, bool side_left){ // Get the dimensions auto nrow_a = a.size(/*dim=*/0); auto nrow_v = v.size(/*dim=*/0); auto ncol_a = a.size(/*dim=*/1); auto ncol_v = v.size(/*dim=*/1); auto nrow_res = fmax(double(nrow_a), double(nrow_v)); // prepare the kernel configuration dim3 threads(ncol_v, nrow_res); dim3 blocks(1, 1); if (nrow_res*ncol_v > 1024){ threads.x = int(fmin(double(1024), double(ncol_v))); threads.y = floor(1024/threads.x); blocks.x = ceil(double(ncol_v)/double(threads.x)); blocks.y = ceil(double(nrow_res)/double(threads.y)); } AT_DISPATCH_ALL_TYPES(a.type(), "searchsorted cuda", ([&] { searchsorted_kernel<scalar_t><<<blocks, threads>>>( res.data<int64_t>(), a.data<scalar_t>(), v.data<scalar_t>(), nrow_res, nrow_a, nrow_v, ncol_a, ncol_v, side_left); })); }
851cc201b8bd3d80dba3b816dd4f4f59fd98659e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <text/subword/detail/codepoint_metadata.ah> #include <text/subword/detail/data_normalizer.hpp> #include <text/subword/detail/tokenizer_utils.cuh> #include <nvtext/detail/load_hash_file.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <stdint.h> #include <algorithm> #include <fstream> #include <iostream> #include <vector> namespace nvtext { namespace detail { /** * @brief Retrieve the code point metadata table. * * Build the code point metadata table in device memory * using the vector pieces from codepoint_metadata.ah */ const codepoint_metadata_type* get_codepoint_metadata(rmm::cuda_stream_view stream) { static cudf::strings::detail::thread_safe_per_context_cache<codepoint_metadata_type> g_codepoint_metadata; return g_codepoint_metadata.find_or_initialize([stream](void) { codepoint_metadata_type* table = static_cast<codepoint_metadata_type*>(rmm::mr::get_current_device_resource()->allocate( codepoint_metadata_size * sizeof(codepoint_metadata_type), stream)); thrust::fill(rmm::exec_policy(stream), table + cp_section1_end, table + codepoint_metadata_size, codepoint_metadata_default_value); CUDA_TRY(hipMemcpyAsync(table, codepoint_metadata, cp_section1_end * sizeof(codepoint_metadata[0]), // 1st section hipMemcpyHostToDevice, stream.value())); CUDA_TRY(hipMemcpyAsync( table + cp_section2_begin, cp_metadata_917505_917999, (cp_section2_end - cp_section2_begin + 1) * sizeof(codepoint_metadata[0]), // 2nd section hipMemcpyHostToDevice, stream.value())); return table; }); } /** * @brief Retrieve the aux code point data table. * * Build the aux code point data table in device memory * using the vector pieces from codepoint_metadata.ah */ const aux_codepoint_data_type* get_aux_codepoint_data(rmm::cuda_stream_view stream) { static cudf::strings::detail::thread_safe_per_context_cache<aux_codepoint_data_type> g_aux_codepoint_data; return g_aux_codepoint_data.find_or_initialize([stream](void) { aux_codepoint_data_type* table = static_cast<aux_codepoint_data_type*>(rmm::mr::get_current_device_resource()->allocate( aux_codepoint_data_size * sizeof(aux_codepoint_data_type), stream)); thrust::fill(rmm::exec_policy(stream), table + aux_section1_end, table + aux_codepoint_data_size, aux_codepoint_default_value); CUDA_TRY(hipMemcpyAsync(table, aux_codepoint_data, aux_section1_end * sizeof(aux_codepoint_data[0]), // 1st section hipMemcpyHostToDevice, stream.value())); CUDA_TRY(hipMemcpyAsync( table + aux_section2_begin, aux_cp_data_44032_55203, (aux_section2_end - aux_section2_begin + 1) * sizeof(aux_codepoint_data[0]), // 2nd section hipMemcpyHostToDevice, stream.value())); CUDA_TRY(hipMemcpyAsync( table + aux_section3_begin, aux_cp_data_70475_71099, (aux_section3_end - aux_section3_begin + 1) * sizeof(aux_codepoint_data[0]), // 3rd section hipMemcpyHostToDevice, stream.value())); CUDA_TRY(hipMemcpyAsync( table + aux_section4_begin, aux_cp_data_119134_119232, (aux_section4_end - aux_section4_begin + 1) * sizeof(aux_codepoint_data[0]), // 4th section hipMemcpyHostToDevice, stream.value())); return table; }); } namespace { /** * @brief Convert string to uint32. * * This just wraps the std::stoi but provides a nice error message * in case the hash file format is incorrect. */ uint32_t str_to_uint32(std::string const& str, uint64_t line_no) { try { return std::stoi(str); // there is no std::stoui } catch (std::exception const& exc) { std::string message("Line "); message += std::to_string(line_no) + ": "; message += "cannot convert integer from '"; message += str; message += "': "; message += exc.what(); std::cerr << message << std::endl; throw; } } /** * @brief Convert string to uint64. * * This just wraps the std::stoul but provides a nice error message * in case the hash file format is incorrect. */ uint64_t str_to_uint64(std::string const& str, uint64_t line_no) { try { return std::stoul(str); } catch (std::exception const& exc) { std::string message("Line "); message += std::to_string(line_no) + ": "; message += "cannot convert integer from '"; message += str; message += "': "; message += exc.what(); std::cerr << message << std::endl; throw; } } } // namespace /** * @brief Loads a text file representing the hashed vocabulary into hashed_vocabulary struct. * * @code{.pseudo} * Format of the file (ASCII text file with numbers): * First 3 lines have the following values: * outer_hash_a * outer_hash_b * number-of-bins * The next number-of-bins lines has two values in each line separated by a space * coefficient offset * ... * Next line has the size (number of lines) of the table followed * by the table values -- one value per line. * The last three lines: * unknown_token_id * first_token_id * separator_token_id * @endcode * * @param filename_hashed_vocabulary Path to text file containing hashed vocabulary * @return object containing hash table elements for the wordpiece tokenizer */ std::unique_ptr<hashed_vocabulary> load_vocabulary_file( std::string const& filename_hashed_vocabulary, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { hashed_vocabulary result; std::ifstream hash_file(filename_hashed_vocabulary); CUDF_EXPECTS(hash_file.good(), "Could not open " + filename_hashed_vocabulary); uint64_t line_no = 1; std::string line; std::getline(hash_file, line); result.outer_hash_a = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.outer_hash_b = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.num_bins = str_to_uint32(line, line_no++); std::vector<uint64_t> bin_coefficients(result.num_bins); std::vector<uint16_t> bin_offsets(result.num_bins); for (int i = 0; i < result.num_bins; ++i) { std::getline(hash_file, line); size_t loc_of_space = line.find(" "); CUDF_EXPECTS(loc_of_space != line.npos, "invalid hash file format"); std::string first_num = line.substr(0, loc_of_space); std::string second_num = line.substr(loc_of_space + 1, line.length()); bin_coefficients[i] = str_to_uint64(first_num, line_no); bin_offsets[i] = str_to_uint32(second_num, line_no); ++line_no; } std::getline(hash_file, line); uint64_t hash_table_length = str_to_uint64(line, line_no++); std::vector<uint64_t> table(hash_table_length); std::generate(table.begin(), table.end(), [&hash_file, &line_no]() { std::string line; std::getline(hash_file, line); return str_to_uint64(line, line_no++); }); std::getline(hash_file, line); result.unknown_token_id = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.first_token_id = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.separator_token_id = str_to_uint32(line, line_no++); // Transfer hash table to columns result.table = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT64}, table.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDA_TRY(hipMemcpyAsync(result.table->mutable_view().data<uint64_t>(), table.data(), table.size() * sizeof(uint64_t), hipMemcpyHostToDevice, stream.value())); result.bin_coefficients = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT64}, bin_coefficients.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDA_TRY(hipMemcpyAsync(result.bin_coefficients->mutable_view().data<uint64_t>(), bin_coefficients.data(), bin_coefficients.size() * sizeof(uint64_t), hipMemcpyHostToDevice, stream.value())); result.bin_offsets = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT16}, bin_offsets.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDA_TRY(hipMemcpyAsync(result.bin_offsets->mutable_view().data<uint16_t>(), bin_offsets.data(), bin_offsets.size() * sizeof(uint16_t), hipMemcpyHostToDevice, stream.value())); // this just initializes some constant tables into device memory // to help speed up the runtime detail::get_codepoint_metadata(stream); detail::get_aux_codepoint_data(stream); return std::make_unique<hashed_vocabulary>(std::move(result)); } } // namespace detail std::unique_ptr<hashed_vocabulary> load_vocabulary_file( std::string const& filename_hashed_vocabulary, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::load_vocabulary_file(filename_hashed_vocabulary, rmm::cuda_stream_default, mr); } } // namespace nvtext
851cc201b8bd3d80dba3b816dd4f4f59fd98659e.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <text/subword/detail/codepoint_metadata.ah> #include <text/subword/detail/data_normalizer.hpp> #include <text/subword/detail/tokenizer_utils.cuh> #include <nvtext/detail/load_hash_file.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <stdint.h> #include <algorithm> #include <fstream> #include <iostream> #include <vector> namespace nvtext { namespace detail { /** * @brief Retrieve the code point metadata table. * * Build the code point metadata table in device memory * using the vector pieces from codepoint_metadata.ah */ const codepoint_metadata_type* get_codepoint_metadata(rmm::cuda_stream_view stream) { static cudf::strings::detail::thread_safe_per_context_cache<codepoint_metadata_type> g_codepoint_metadata; return g_codepoint_metadata.find_or_initialize([stream](void) { codepoint_metadata_type* table = static_cast<codepoint_metadata_type*>(rmm::mr::get_current_device_resource()->allocate( codepoint_metadata_size * sizeof(codepoint_metadata_type), stream)); thrust::fill(rmm::exec_policy(stream), table + cp_section1_end, table + codepoint_metadata_size, codepoint_metadata_default_value); CUDA_TRY(cudaMemcpyAsync(table, codepoint_metadata, cp_section1_end * sizeof(codepoint_metadata[0]), // 1st section cudaMemcpyHostToDevice, stream.value())); CUDA_TRY(cudaMemcpyAsync( table + cp_section2_begin, cp_metadata_917505_917999, (cp_section2_end - cp_section2_begin + 1) * sizeof(codepoint_metadata[0]), // 2nd section cudaMemcpyHostToDevice, stream.value())); return table; }); } /** * @brief Retrieve the aux code point data table. * * Build the aux code point data table in device memory * using the vector pieces from codepoint_metadata.ah */ const aux_codepoint_data_type* get_aux_codepoint_data(rmm::cuda_stream_view stream) { static cudf::strings::detail::thread_safe_per_context_cache<aux_codepoint_data_type> g_aux_codepoint_data; return g_aux_codepoint_data.find_or_initialize([stream](void) { aux_codepoint_data_type* table = static_cast<aux_codepoint_data_type*>(rmm::mr::get_current_device_resource()->allocate( aux_codepoint_data_size * sizeof(aux_codepoint_data_type), stream)); thrust::fill(rmm::exec_policy(stream), table + aux_section1_end, table + aux_codepoint_data_size, aux_codepoint_default_value); CUDA_TRY(cudaMemcpyAsync(table, aux_codepoint_data, aux_section1_end * sizeof(aux_codepoint_data[0]), // 1st section cudaMemcpyHostToDevice, stream.value())); CUDA_TRY(cudaMemcpyAsync( table + aux_section2_begin, aux_cp_data_44032_55203, (aux_section2_end - aux_section2_begin + 1) * sizeof(aux_codepoint_data[0]), // 2nd section cudaMemcpyHostToDevice, stream.value())); CUDA_TRY(cudaMemcpyAsync( table + aux_section3_begin, aux_cp_data_70475_71099, (aux_section3_end - aux_section3_begin + 1) * sizeof(aux_codepoint_data[0]), // 3rd section cudaMemcpyHostToDevice, stream.value())); CUDA_TRY(cudaMemcpyAsync( table + aux_section4_begin, aux_cp_data_119134_119232, (aux_section4_end - aux_section4_begin + 1) * sizeof(aux_codepoint_data[0]), // 4th section cudaMemcpyHostToDevice, stream.value())); return table; }); } namespace { /** * @brief Convert string to uint32. * * This just wraps the std::stoi but provides a nice error message * in case the hash file format is incorrect. */ uint32_t str_to_uint32(std::string const& str, uint64_t line_no) { try { return std::stoi(str); // there is no std::stoui } catch (std::exception const& exc) { std::string message("Line "); message += std::to_string(line_no) + ": "; message += "cannot convert integer from '"; message += str; message += "': "; message += exc.what(); std::cerr << message << std::endl; throw; } } /** * @brief Convert string to uint64. * * This just wraps the std::stoul but provides a nice error message * in case the hash file format is incorrect. */ uint64_t str_to_uint64(std::string const& str, uint64_t line_no) { try { return std::stoul(str); } catch (std::exception const& exc) { std::string message("Line "); message += std::to_string(line_no) + ": "; message += "cannot convert integer from '"; message += str; message += "': "; message += exc.what(); std::cerr << message << std::endl; throw; } } } // namespace /** * @brief Loads a text file representing the hashed vocabulary into hashed_vocabulary struct. * * @code{.pseudo} * Format of the file (ASCII text file with numbers): * First 3 lines have the following values: * outer_hash_a * outer_hash_b * number-of-bins * The next number-of-bins lines has two values in each line separated by a space * coefficient offset * ... * Next line has the size (number of lines) of the table followed * by the table values -- one value per line. * The last three lines: * unknown_token_id * first_token_id * separator_token_id * @endcode * * @param filename_hashed_vocabulary Path to text file containing hashed vocabulary * @return object containing hash table elements for the wordpiece tokenizer */ std::unique_ptr<hashed_vocabulary> load_vocabulary_file( std::string const& filename_hashed_vocabulary, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { hashed_vocabulary result; std::ifstream hash_file(filename_hashed_vocabulary); CUDF_EXPECTS(hash_file.good(), "Could not open " + filename_hashed_vocabulary); uint64_t line_no = 1; std::string line; std::getline(hash_file, line); result.outer_hash_a = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.outer_hash_b = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.num_bins = str_to_uint32(line, line_no++); std::vector<uint64_t> bin_coefficients(result.num_bins); std::vector<uint16_t> bin_offsets(result.num_bins); for (int i = 0; i < result.num_bins; ++i) { std::getline(hash_file, line); size_t loc_of_space = line.find(" "); CUDF_EXPECTS(loc_of_space != line.npos, "invalid hash file format"); std::string first_num = line.substr(0, loc_of_space); std::string second_num = line.substr(loc_of_space + 1, line.length()); bin_coefficients[i] = str_to_uint64(first_num, line_no); bin_offsets[i] = str_to_uint32(second_num, line_no); ++line_no; } std::getline(hash_file, line); uint64_t hash_table_length = str_to_uint64(line, line_no++); std::vector<uint64_t> table(hash_table_length); std::generate(table.begin(), table.end(), [&hash_file, &line_no]() { std::string line; std::getline(hash_file, line); return str_to_uint64(line, line_no++); }); std::getline(hash_file, line); result.unknown_token_id = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.first_token_id = str_to_uint32(line, line_no++); std::getline(hash_file, line); result.separator_token_id = str_to_uint32(line, line_no++); // Transfer hash table to columns result.table = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT64}, table.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDA_TRY(cudaMemcpyAsync(result.table->mutable_view().data<uint64_t>(), table.data(), table.size() * sizeof(uint64_t), cudaMemcpyHostToDevice, stream.value())); result.bin_coefficients = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT64}, bin_coefficients.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDA_TRY(cudaMemcpyAsync(result.bin_coefficients->mutable_view().data<uint64_t>(), bin_coefficients.data(), bin_coefficients.size() * sizeof(uint64_t), cudaMemcpyHostToDevice, stream.value())); result.bin_offsets = cudf::make_numeric_column(cudf::data_type{cudf::type_id::UINT16}, bin_offsets.size(), cudf::mask_state::UNALLOCATED, stream, mr); CUDA_TRY(cudaMemcpyAsync(result.bin_offsets->mutable_view().data<uint16_t>(), bin_offsets.data(), bin_offsets.size() * sizeof(uint16_t), cudaMemcpyHostToDevice, stream.value())); // this just initializes some constant tables into device memory // to help speed up the runtime detail::get_codepoint_metadata(stream); detail::get_aux_codepoint_data(stream); return std::make_unique<hashed_vocabulary>(std::move(result)); } } // namespace detail std::unique_ptr<hashed_vocabulary> load_vocabulary_file( std::string const& filename_hashed_vocabulary, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::load_vocabulary_file(filename_hashed_vocabulary, rmm::cuda_stream_default, mr); } } // namespace nvtext
50b233a5e1ac82dfa1202cd2c074bc2100e80e28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*----------------------- BEGIN box_model_rates.cu BEGIN ----------------------*/ /* @file box_model_rates.cu */ /* @author charlesj */ /* @date 2014-10-30 17:04:39.049060 */ /* @brief Reaction rate calculation and utility functions */ /* */ /* Reaction rate calculation and utility functions */ /* */ /* This file was generated by Kppa: http://www.paratools.com/Kppa */ /*-----------------------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include "box_model_cu_parameters.h" #include "box_model_rates.h" /* BEGIN INLINE declared at /users/charlesj/KPP_BOXMODEL/0-dim_box_model/kppa-0.2.1/box_model.def:323,1 */ __device__ double PHUX(double const X, double const Y, double const Z, double CHI) { double const MINYZ = -30.0; double const EMINYZ = 9.357623e-14; double EYCHIZ, CHIZ, YCHIZ; CHIZ = CHI * Z; if (CHIZ < 1.57079632679489){ YCHIZ = Y * (1.0 - (1.0/ cos(CHIZ))); if (YCHIZ > MINYZ){ EYCHIZ = exp(YCHIZ); } else{ EYCHIZ = EMINYZ; } } else{ EYCHIZ = EMINYZ; } return X * EYCHIZ; } /*----------------------------------- TROE2 -----------------------------------*/ /* @param[in] K0300 None */ /* @param[in] Q None */ /* @param[in] KU300 None */ /* @param[in] R None */ /* @param[in] T None */ /*-----------------------------------------------------------------------------*/ __device__ double TROE2(double const K0300, double const Q, double const KU300, double const R, double const T) { double TT, K0, KU, K0M, KK, LGKK, E, F; double const M = 2.55e19; TT = T / 3.e2; K0 = K0300 / pow(TT,Q); KU = KU300 / pow(TT,R); K0M = K0 * M; KK = K0M / KU; LGKK = 0.434294481 * log(KK); E = 1.0 / (1.0 + LGKK*LGKK); F = pow(0.6,E); return F * K0M / (1. + KK); } /*------------------------------------ EQT ------------------------------------*/ /* @param[in] K0300 None */ /* @param[in] Q None */ /* @param[in] KU300 None */ /* @param[in] R None */ /* @param[in] T None */ /* @param[in] A None */ /* @param[in] B None */ /*-----------------------------------------------------------------------------*/ __device__ double EQT(double const K0300, double const Q, double const KU300, double const R, double const T, double const A, double const B) { double KH; KH = TROE2( K0300, Q, KU300, R, T ); return (KH * A * exp( -B / T )); } /*----------------------------------- EQT2 ------------------------------------*/ /* @param[in] T None */ /*-----------------------------------------------------------------------------*/ __device__ double EQT2(double const T) { double K0300, Q, KU300, R, A, B, KH; K0300 = 1.8e-31; Q = 3.2; KU300 = 4.7e-12; R = 1.4; A = 4.76e+26; B = 10900; KH = TROE2( K0300, Q, KU300, R, T ); return KH * A * exp( -B / T ); } /*----------------------------------- SPEZ ------------------------------------*/ /* @param[in] A0 None */ /* @param[in] B0 None */ /* @param[in] A2 None */ /* @param[in] B2 None */ /* @param[in] A3 None */ /* @param[in] B3 None */ /* @param[in] M None */ /* @param[in] T None */ /*-----------------------------------------------------------------------------*/ __device__ double SPEZ(double const A0, double const B0, double const A2, double const B2, double const A3, double const B3, double const T) { double const M = 2.55e19; double K0, K2, K3; K0 = A0*exp(B0/T); K2 = A2*exp(B2/T); K3 = A3*M*exp(B3/T); return K0 + K3 / ( 1 + K3/K2 ); } /* END INLINE declared at /users/charlesj/KPP_BOXMODEL/0-dim_box_model/kppa-0.2.1/box_model.def:323,1 */ /* Be friendly to Fortran mathmatical intrinsics */ #define SQRT(X) sqrtf(x) #define DSQRT(X) sqrt(x) #define EXP(X) expf(x) #define DEXP(X) exp(x) #define LOG(x) log(x) #define ALOG(X) logf(x) #define DLOG(X) log(x) #define LOG10(x) log10(x) #define ALOG10(X) logf10(x) #define DLOG10(X) log10(x) #define SIN(X) sinf(x) #define DSIN(X) sin(x) #define COS(X) cosf(x) #define DCOS(X) cos(x) #define TAN(X) tanf(x) #define DTAN(X) tan(x) #define POW(X,Y) powf(x, y) #define DPOW(X,Y) pow(x, y) /*------------------------------------ ARR ------------------------------------*/ /* @param[in] a0 None */ /* @param[in] b0 None */ /* @param[in] c0 None */ /* @param[in] temp Temperature */ /*-----------------------------------------------------------------------------*/ __device__ double ARR(double const a0, double const b0, double const c0, double const temp) { return __dmul_rn(__dmul_rn(a0, pow(__ddiv_rn(temp,(double)300.0), c0)), exp(__ddiv_rn(-b0,temp))); }/* END ARR */ /*------------------------------------ ARR2 -----------------------------------*/ /* @param[in] a0 None */ /* @param[in] b0 None */ /* @param[in] temp Temperature */ /*-----------------------------------------------------------------------------*/ __device__ double ARR2(double const a0, double const b0, double const temp) { return __dmul_rn(a0, exp(__ddiv_rn(b0,temp))); }/* END ARR2 */ /*------------------------------------ EP2 ------------------------------------*/ /* @param[in] a0 None */ /* @param[in] c0 None */ /* @param[in] a2 None */ /* @param[in] c2 None */ /* @param[in] a3 None */ /* @param[in] c3 None */ /* @param[in] temp Temperature */ /*-----------------------------------------------------------------------------*/ __device__ double EP2(double const a0, double const c0, double const a2, double const c2, double const a3, double const c3, double const temp) { double k0 = __dmul_rn(a0, exp(__ddiv_rn(-c0,temp))); double k2 = __dmul_rn(a2, exp(__ddiv_rn(-c2,temp))); double k3 = __dmul_rn(a3, exp(__ddiv_rn(-c3,temp))) * (double)1.0e6*CFACTOR; return __dadd_rn(k0, __ddiv_rn(k3, __dadd_rn((double)1.0, __ddiv_rn(k3,k2)))); }/* END EP2 */ /*------------------------------------ EP3 ------------------------------------*/ /* @param[in] a1 None */ /* @param[in] c1 None */ /* @param[in] a2 None */ /* @param[in] c2 None */ /* @param[in] temp Temperature */ /*-----------------------------------------------------------------------------*/ __device__ double EP3(double const a1, double const c1, double const a2, double const c2, double const temp) { double k1 = __dmul_rn(a1, exp(__ddiv_rn(-c1,temp))); double k2 = __dmul_rn(a2, exp(__ddiv_rn(-c2,temp))); return __dadd_rn(k1, __dmul_rn(k2, (double)1.0e6*CFACTOR)); }/* END EP3 */ /*------------------------------------ FALL -----------------------------------*/ /* @param[in] a0 None */ /* @param[in] b0 None */ /* @param[in] c0 None */ /* @param[in] a1 None */ /* @param[in] b1 None */ /* @param[in] c1 None */ /* @param[in] cf None */ /* @param[in] temp Temperature */ /*-----------------------------------------------------------------------------*/ __device__ double FALL(double const a0, double const b0, double const c0, double const a1, double const b1, double const c1, double const cf, double const temp) { /* Accuracy trumps precision in these calculations */ double k0 = a0 * pow(temp/(double)300.0, c0) * exp(-b0/temp) * (double)1.0e6*CFACTOR; double k1 = k0 / (a1 * pow(temp/(double)300.0, c1) * exp(-b1/temp)); return (k0/((double)1.0+k1)) * pow(cf, (double)1.0/((double)1.0+pow(log10(k1),(double)2.0))); }/* END FALL */ /*---------------------------------- Sunlight ---------------------------------*/ /* Calculates sunlight intensity in the range [0,1] as a function of time. */ /* Modify this routine to get the correct sunlight values for your model. */ /* */ /* @param[in] time Integration time */ /* @param[in] idx Current grid cell index */ /*-----------------------------------------------------------------------------*/ __device__ double Sunlight(double const time, size_t const idx) { int daysec = 24 * 3600; /* Seconds per day */ float sunrise = 5.5*3600; /* 5:30 local time */ float sunset = 19.5*3600; /* 7:30 local time */ float daily = time - ((int)time / daysec) * daysec; float tmp; /* Estimate sunlight intensity in the range [0,1] */ if ((daily >= sunrise) && (daily <= sunset)) { tmp = __ddiv_rn(2.0*daily - sunrise-sunset, sunset-sunrise); tmp = (tmp > 0) ? tmp * tmp : -tmp * tmp; tmp = 0.5 * (1.0 + cospi(tmp)); } else { tmp = 0.0; } return tmp; }/* END Sunlight */ /*------------------------------------ TROE -----------------------------------*/ /* Troe reactions (Stockwell et. al., 1997) */ /* */ /* @param[in] k0_300K None */ /* @param[in] n None */ /* @param[in] kinf_300K None */ /* @param[in] m None */ /* @param[in] temp Temperature */ /* @param[in] cair None */ /*-----------------------------------------------------------------------------*/ __device__ double TROE(double const k0_300K, double const n, double const kinf_300K, double const m, double const temp, double const cair) { double zt_help = 300.0/temp; double k0_T = k0_300K * pow(zt_help, n) * cair; double kinf_T = kinf_300K * pow(zt_help, m); double k_ratio = k0_T/kinf_T; return k0_T / (1.0 + k_ratio) * pow(0.6, 1.0/(1.0+pow(log10(k_ratio), 2))); }/* END TROE */ /*----------------------------------- TROEE -----------------------------------*/ /* Troe equilibrium reactions (Stockwell et. al., 1997) */ /* */ /* @param[in] a0 None */ /* @param[in] b0 None */ /* @param[in] k0_300K None */ /* @param[in] n None */ /* @param[in] kinf_300K None */ /* @param[in] m None */ /* @param[in] temp Temperature */ /* @param[in] cair None */ /*-----------------------------------------------------------------------------*/ __device__ double TROEE(double const a0, double const b0, double const k0_300K, double const n, double const kinf_300K, double const m, double const temp, double const cair) { double zt_help = 300.0/temp; double k0_T = k0_300K * pow(zt_help,n) * cair; double kinf_T = kinf_300K * pow(zt_help,m); double k_ratio = k0_T/kinf_T; double troe = k0_T / (1.0 + k_ratio) * pow(0.6, 1.0/(1.0+pow(log10(k_ratio), 2))); return a0 * exp(-b0 / temp) * troe; }/* END TROEE */ /*-------------------------------- Temperature --------------------------------*/ /* Calculates temperature (kelvin) as a function of time. */ /* Modify this routine to get the correct temperature values for your model. */ /* */ /* @param[in] time Integration time */ /* @param[in] idx Current grid cell index */ /*-----------------------------------------------------------------------------*/ __device__ double Temperature(double const time, size_t const idx) { float mintemp = 280; /* 280 Kelvin ~= 44 Fahrenheit */ float maxtemp = 300; /* 300 Kelvin ~= 80 Fahrenheit */ float tmp; /* Estimate temperature cycling from mintemp to maxtemp */ tmp = sinpi(__ddiv_rn(time,24*3600)); if (tmp < 0) { tmp = mintemp - tmp * (maxtemp-mintemp); } else { tmp = mintemp + tmp * (maxtemp-mintemp); } return tmp; }/* END Temperature */ /*---------------------------------- d_Rates ----------------------------------*/ /* CUDA kernel for Rates */ /* */ /* @param[in] ncells32 A multiple of 32 grid cells */ /* @param[in] time Integration time */ /* @param[in] idx Current grid cell index */ /* @param[out] rct Reaction rates */ /*-----------------------------------------------------------------------------*/ __global__ void d_Rates(size_t const ncells32, double const time, size_t const idx, double* rct, double CHI) { /* Sunlight intensity: 0 to 1 inclusive (uppercase for KPP compatibility) */ double SUN; /* Temperature in kelvin (uppercase for KPP compatibility) */ // double TEMP; // SUN = Sunlight(time, idx); // TEMP = Temperature(time, idx); size_t tidx = blockDim.x*blockIdx.x + threadIdx.x; if(tidx < ncells32) { rct += tidx; rct[0] = PHUX(1.07E-02,1.01319E+00,0.83330E+00,CHI); rct[ncells32] = PHUX(3.22E-05,4.45037E+00,0.78028E+00,CHI); rct[2*ncells32] = PHUX(5.36E-04,0.34764E+00,0.91030E+00,CHI); rct[3*ncells32] = PHUX(8.96E-04,0.99438E+00,0.83295E+00,CHI); rct[4*ncells32] = PHUX(5.48E-07,2.86922E+00,0.79561E+00,CHI); rct[5*ncells32] = PHUX(3.90E-06,2.37354E+00,0.79830E+00,CHI)+EQT2(TEMP); rct[6*ncells32] = PHUX(2.74E-02,0.26226E+00,0.92849E+00,CHI); rct[7*ncells32] = PHUX(2.73E-01,0.29327E+00,0.92401E+00,CHI); rct[8*ncells32] = PHUX(7.78E-06,1.91463E+00,0.79810E+00,CHI); rct[9*ncells32] = PHUX(4.92E-05,1.60973E+00,0.80184E+00,CHI); rct[10*ncells32] = PHUX(4.05E-05,2.06917E+00,0.80267E+00,CHI); rct[11*ncells32] = PHUX(5.40E-06,2.52915E+00,0.79722E+00,CHI); rct[12*ncells32] = PHUX(6.37E-06,1.76570E+00,0.80004E+00,CHI); rct[13*ncells32] = PHUX(6.37E-06,1.76570E+00,0.80004E+00,CHI); rct[14*ncells32] = PHUX(6.10E-09,9.17009E+00,0.72585E+00,CHI); rct[15*ncells32] = PHUX(1.32E-05,2.46350E+00,0.79768E+00,CHI); rct[16*ncells32] = PHUX(3.11E-03,0.55016E+00,0.88313E+00,CHI); rct[17*ncells32] = PHUX(3.11E-03,0.55016E+00,0.88313E+00,CHI); rct[18*ncells32] = PHUX(1.85E-03,0.57967E+00,0.87921E+00,CHI); rct[19*ncells32] = PHUX(6.39E-04,1.53712E+00,0.80233E+00,CHI); rct[20*ncells32] = PHUX(7.20E-08,9.11436E+00,0.72600E+00,CHI); rct[21*ncells32] = 2.55e19 * 6.0E-34 * pow((TEMP/300.),(-2.3)); rct[22*ncells32] = 6.50E-12 * exp( 120. / TEMP ); rct[23*ncells32] = 2.00E-11 * exp( 130. / TEMP ); rct[24*ncells32] = 3.20E-11 * exp( 67. / TEMP ); rct[25*ncells32] = 2.14e-10; rct[26*ncells32] = 1.4E-12 * exp( -1310. / TEMP ); rct[27*ncells32] = 1.70E-12 * exp( -940. / TEMP ); rct[28*ncells32] = 1.10E-14 * exp( -500. / TEMP ); rct[29*ncells32] = 3.45E-12 * exp( 270. / TEMP ); rct[30*ncells32] = TROE2( 1.8E-31, 3.2e0, 4.7E-12, 1.4e0, TEMP ); rct[31*ncells32] = 2.2E-13 * exp(620./TEMP) + 1.9E-33 * 2.55e19 * exp(980./TEMP); rct[32*ncells32] = 3.08e-34*exp(2820./TEMP)+2.66e-54*2.55e19*exp(3180./TEMP); rct[33*ncells32] = 3.30E-12 * exp( -200. / TEMP ); rct[34*ncells32] = TROE2( 7.0e-31, 2.6e0, 1.5e-11, 0.5e0, TEMP ); rct[35*ncells32] = 3.30E-39 * exp( 530. / TEMP ); rct[36*ncells32] = 1.40E-13 * exp( -2470. / TEMP ); rct[37*ncells32] = 1.80E-11 * exp( 110. / TEMP ); rct[38*ncells32] = 2.50E-14 * exp( -1230. / TEMP ); rct[39*ncells32] = 2.5e-12; rct[40*ncells32] = TROE2( 2.2e-30, 4.3e0, 1.5e-12, 0.5e0, TEMP ); rct[41*ncells32] = EQT(2.2e-30,4.3e0,1.5e-12,0.5e0,TEMP,9.09e+26,11200.e0); rct[42*ncells32] = 0.0; rct[43*ncells32] = TROE2( 2.6e-30, 3.2e0, 2.4e-11, 1.3e0, TEMP ); rct[44*ncells32] = SPEZ(7.2e-15,785.e0,4.1e-16,1440.e0,1.9e-33,725.e0,TEMP); rct[45*ncells32] = 1.30E-12 * exp( 380. / TEMP ); rct[46*ncells32] = 4.80E-11 * exp( 250. / TEMP ); rct[47*ncells32] = TROE2( 3.0e-31, 3.3e0, 1.5e-12, 0.0e0, TEMP ); rct[48*ncells32] = 2.4329175e-13; rct[49*ncells32] = TEMP * TEMP * 6.95E-18 * exp( -1280. / TEMP ); rct[50*ncells32] = TEMP * TEMP * 1.37E-17 * exp( -444. /TEMP ); rct[51*ncells32] = 1.59E-11 * exp( -540. / TEMP ); rct[52*ncells32] = 1.73E-11 * exp( -380. / TEMP ); rct[53*ncells32] = 3.64E-11 * exp( -380. / TEMP ); rct[54*ncells32] = 2.15E-12 * exp( 411. / TEMP ); rct[55*ncells32] = 5.32E-12 * exp( 504. / TEMP ); rct[56*ncells32] = 1.07E-11 * exp( 549. / TEMP ); rct[57*ncells32] = 2.10E-12 * exp( 322. / TEMP ); rct[58*ncells32] = 1.89E-11 * exp( 116. / TEMP ); rct[59*ncells32] = 4e-11; rct[60*ncells32] = 9e-12; rct[61*ncells32] = 6.87E-12 * exp( 256. / TEMP ); rct[62*ncells32] = 1.20E-11 * exp( -745. / TEMP ); rct[63*ncells32] = 1.15e-11; rct[64*ncells32] = 1.7e-11; rct[65*ncells32] = 2.8e-11; rct[66*ncells32] = 1e-11; rct[67*ncells32] = 1e-11; rct[68*ncells32] = 1e-11; rct[69*ncells32] = TEMP * TEMP *6.85E-18 * exp( -444. / TEMP ); rct[70*ncells32] = 1.55E-11 * exp( -540. / TEMP ); rct[71*ncells32] = 2.55E-11 * exp( 409. / TEMP ); rct[72*ncells32] = 2.6E-12 * exp ( 380. / TEMP); rct[73*ncells32] = 2.E16 * exp (-13500. / TEMP); rct[74*ncells32] = 4.7e-12; rct[75*ncells32] = 1.95E16 * exp(-13543. / TEMP ); rct[76*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[77*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[78*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[79*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[80*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[81*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[82*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[83*ncells32] = 3.50E-11 * exp( -180. / TEMP ); rct[84*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[85*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[86*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[87*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[88*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[89*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[90*ncells32] = 6.00E-13 * exp( -2058. / TEMP ); rct[91*ncells32] = 1.40E-12 * exp( -1900. / TEMP); rct[92*ncells32] = 6.00E-13 * exp( -2058. / TEMP ); rct[93*ncells32] = 1.40E-12 * exp( -1900. / TEMP); rct[94*ncells32] = 1.40E-12 * exp( -1900. / TEMP); rct[95*ncells32] = 2.2e-11; rct[96*ncells32] = 2.00E-12 * exp( -2923. / TEMP ); rct[97*ncells32] = 1.00E-11 * exp( -1895. / TEMP ); rct[98*ncells32] = 3.23E-11 * exp( -975. / TEMP ); rct[99*ncells32] = 5.81e-13; rct[100*ncells32] = 1.20E-14 * exp( -2633. / TEMP ); rct[101*ncells32] = 1.32E-14 * exp( -2105. / TEMP ); rct[102*ncells32] = 7.29E-15 * exp( -1136. / TEMP ); rct[103*ncells32] = 1.23E-14 * exp( -2013. / TEMP ); rct[104*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[105*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[106*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[107*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[108*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[109*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[110*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[111*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[112*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[113*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[114*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[115*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[116*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[117*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[118*ncells32] = 1.90E-13 * exp( 220. / TEMP ); rct[119*ncells32] = 1.40E-13 * exp( 220. / TEMP ); rct[120*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[121*ncells32] = 3.40E-14 * exp( 220. / TEMP ); rct[122*ncells32] = 2.90E-14 * exp( 220. / TEMP ); rct[123*ncells32] = 1.40E-13 * exp( 220. / TEMP ); rct[124*ncells32] = 1.40E-13 * exp( 220. / TEMP ); rct[125*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[126*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[127*ncells32] = 9.60E-13 * exp( 220. / TEMP ); rct[128*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[129*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[130*ncells32] = 9.60E-13 * exp( 220. / TEMP ); rct[131*ncells32] = 3.40E-13 * exp( 220. / TEMP ); rct[132*ncells32] = 1.00E-13 * exp( 220. / TEMP ); rct[133*ncells32] = 8.40E-14 * exp( 220. / TEMP ); rct[134*ncells32] = 7.20E-14 * exp( 220. / TEMP ); rct[135*ncells32] = 3.40E-13 * exp( 220. / TEMP ); rct[136*ncells32] = 3.40E-13 * exp( 220. / TEMP ); rct[137*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[138*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[139*ncells32] = 1.19E-12 * exp( 220. / TEMP ); rct[140*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[141*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[142*ncells32] = 1.19E-12 * exp( 220. / TEMP ); rct[143*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[144*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[145*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[146*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[147*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[148*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[149*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[150*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[151*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[152*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[153*ncells32] = 3.60E-16 * exp( 220. / TEMP ); rct[154*ncells32] = 1.21E-11 * exp( 444. / TEMP ); rct[155*ncells32] = ARR2(1.19E-12,490.,TEMP); rct[156*ncells32] = ARR2(1.01E-15,-736.,TEMP); rct[157*ncells32] = 4e-12; rct[158*ncells32] = 1.5e-11; rct[159*ncells32] = ARR2(3.56E-14,708.,TEMP); rct[160*ncells32] = ARR2(7.40E-13,765.,TEMP); rct[161*ncells32] = 1.2e-12; rct[162*ncells32] = 1.7e-10; rct[163*ncells32] = 1.22e-11; rct[164*ncells32] = 2e-16; rct[165*ncells32] = 4e-12; rct[166*ncells32] = 1.5e-11; rct[167*ncells32] = ARR2(3.56E-14,708.,TEMP); rct[168*ncells32] = ARR2(7.40E-13,765.,TEMP); rct[169*ncells32] = 1.2e-12; rct[170*ncells32] = ARR2(2.43E-12,360.,TEMP); rct[171*ncells32] = ARR2(2.05E-13,1300.,TEMP); rct[172*ncells32] = 2e-12; rct[173*ncells32] = 1e-10; rct[174*ncells32] = 1.3e-11; rct[175*ncells32] = 0.5*(4.13E-12 * exp( 452. / TEMP ) + 1.86E-11 * exp( 175. / TEMP )); rct[176*ncells32] = 0.5*(1.36E-15 * exp( -2112. / TEMP ) + 7.51E-16 * exp( -1521. / TEMP )); rct[177*ncells32] = ARR2(2.54E-12,360.,TEMP); rct[178*ncells32] = ARR2(1.82E-13,1300.,TEMP); rct[179*ncells32] = 2e-12; rct[180*ncells32] = TROE2( 9.7e-29, -5.6e0, 9.3e-12, -1.5e0, TEMP ); rct[181*ncells32] = TROE2( 9.7e-29, -5.6e0, 9.3e-12, -1.5e0, TEMP )/(ARR2(9.0E-19,14000.,TEMP)); rct[182*ncells32] = 3.6e-12; rct[183*ncells32] = 3e-11; rct[184*ncells32] = 3e-12; rct[185*ncells32] = ARR2(5.60E-12,270.,TEMP); rct[186*ncells32] = ARR2(1.9E-13,500.,TEMP); rct[187*ncells32] = ARR2(9.6E-12,-234.,TEMP); rct[188*ncells32] = ARR2(3.04E-12,350.,TEMP)*ARR2(1.106E-31,7460.,TEMP)*2.55e19/(1+ARR2(1.106E-31,7460.,TEMP)*2.55e19); rct[189*ncells32] = 5.8e-11; rct[190*ncells32] = 2.5e-12; rct[191*ncells32] = 2.5e-12; rct[192*ncells32] = 2.5e-12; rct[193*ncells32] = 0.0; } }/* END d_Rates */ /*----------------------------------- Rates -----------------------------------*/ /* Calculates reaction rate coefficients */ /* */ /* @param[in] ncells32 A multiple of 32 grid cells */ /* @param[in] time Integration time */ /* @param[in] idx Current grid cell index */ /* @param[out] d_rct Reaction rates in device memory */ /*-----------------------------------------------------------------------------*/ void Rates(size_t const ncells32, double const time, size_t const idx, double* d_rct, double CHI) { size_t nBlocks = ((ncells32 + 127) & ~127) >> 7; size_t nThreads = 128; hipLaunchKernelGGL(( d_Rates), dim3(nBlocks), dim3(nThreads), 0, 0, ncells32, time, idx, d_rct, CHI); }/* END Rates */ /*------------------------- END box_model_rates.h END -------------------------*/
50b233a5e1ac82dfa1202cd2c074bc2100e80e28.cu
/*----------------------- BEGIN box_model_rates.cu BEGIN ----------------------*/ /* @file box_model_rates.cu */ /* @author charlesj */ /* @date 2014-10-30 17:04:39.049060 */ /* @brief Reaction rate calculation and utility functions */ /* */ /* Reaction rate calculation and utility functions */ /* */ /* This file was generated by Kppa: http://www.paratools.com/Kppa */ /*-----------------------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include "box_model_cu_parameters.h" #include "box_model_rates.h" /* BEGIN INLINE declared at /users/charlesj/KPP_BOXMODEL/0-dim_box_model/kppa-0.2.1/box_model.def:323,1 */ __device__ double PHUX(double const X, double const Y, double const Z, double CHI) { double const MINYZ = -30.0; double const EMINYZ = 9.357623e-14; double EYCHIZ, CHIZ, YCHIZ; CHIZ = CHI * Z; if (CHIZ < 1.57079632679489){ YCHIZ = Y * (1.0 - (1.0/ cos(CHIZ))); if (YCHIZ > MINYZ){ EYCHIZ = exp(YCHIZ); } else{ EYCHIZ = EMINYZ; } } else{ EYCHIZ = EMINYZ; } return X * EYCHIZ; } /*----------------------------------- TROE2 -----------------------------------*/ /* @param[in] K0300 None */ /* @param[in] Q None */ /* @param[in] KU300 None */ /* @param[in] R None */ /* @param[in] T None */ /*-----------------------------------------------------------------------------*/ __device__ double TROE2(double const K0300, double const Q, double const KU300, double const R, double const T) { double TT, K0, KU, K0M, KK, LGKK, E, F; double const M = 2.55e19; TT = T / 3.e2; K0 = K0300 / pow(TT,Q); KU = KU300 / pow(TT,R); K0M = K0 * M; KK = K0M / KU; LGKK = 0.434294481 * log(KK); E = 1.0 / (1.0 + LGKK*LGKK); F = pow(0.6,E); return F * K0M / (1. + KK); } /*------------------------------------ EQT ------------------------------------*/ /* @param[in] K0300 None */ /* @param[in] Q None */ /* @param[in] KU300 None */ /* @param[in] R None */ /* @param[in] T None */ /* @param[in] A None */ /* @param[in] B None */ /*-----------------------------------------------------------------------------*/ __device__ double EQT(double const K0300, double const Q, double const KU300, double const R, double const T, double const A, double const B) { double KH; KH = TROE2( K0300, Q, KU300, R, T ); return (KH * A * exp( -B / T )); } /*----------------------------------- EQT2 ------------------------------------*/ /* @param[in] T None */ /*-----------------------------------------------------------------------------*/ __device__ double EQT2(double const T) { double K0300, Q, KU300, R, A, B, KH; K0300 = 1.8e-31; Q = 3.2; KU300 = 4.7e-12; R = 1.4; A = 4.76e+26; B = 10900; KH = TROE2( K0300, Q, KU300, R, T ); return KH * A * exp( -B / T ); } /*----------------------------------- SPEZ ------------------------------------*/ /* @param[in] A0 None */ /* @param[in] B0 None */ /* @param[in] A2 None */ /* @param[in] B2 None */ /* @param[in] A3 None */ /* @param[in] B3 None */ /* @param[in] M None */ /* @param[in] T None */ /*-----------------------------------------------------------------------------*/ __device__ double SPEZ(double const A0, double const B0, double const A2, double const B2, double const A3, double const B3, double const T) { double const M = 2.55e19; double K0, K2, K3; K0 = A0*exp(B0/T); K2 = A2*exp(B2/T); K3 = A3*M*exp(B3/T); return K0 + K3 / ( 1 + K3/K2 ); } /* END INLINE declared at /users/charlesj/KPP_BOXMODEL/0-dim_box_model/kppa-0.2.1/box_model.def:323,1 */ /* Be friendly to Fortran mathmatical intrinsics */ #define SQRT(X) sqrtf(x) #define DSQRT(X) sqrt(x) #define EXP(X) expf(x) #define DEXP(X) exp(x) #define LOG(x) log(x) #define ALOG(X) logf(x) #define DLOG(X) log(x) #define LOG10(x) log10(x) #define ALOG10(X) logf10(x) #define DLOG10(X) log10(x) #define SIN(X) sinf(x) #define DSIN(X) sin(x) #define COS(X) cosf(x) #define DCOS(X) cos(x) #define TAN(X) tanf(x) #define DTAN(X) tan(x) #define POW(X,Y) powf(x, y) #define DPOW(X,Y) pow(x, y) /*------------------------------------ ARR ------------------------------------*/ /* @param[in] a0 None */ /* @param[in] b0 None */ /* @param[in] c0 None */ /* @param[in] temp Temperature */ /*-----------------------------------------------------------------------------*/ __device__ double ARR(double const a0, double const b0, double const c0, double const temp) { return __dmul_rn(__dmul_rn(a0, pow(__ddiv_rn(temp,(double)300.0), c0)), exp(__ddiv_rn(-b0,temp))); }/* END ARR */ /*------------------------------------ ARR2 -----------------------------------*/ /* @param[in] a0 None */ /* @param[in] b0 None */ /* @param[in] temp Temperature */ /*-----------------------------------------------------------------------------*/ __device__ double ARR2(double const a0, double const b0, double const temp) { return __dmul_rn(a0, exp(__ddiv_rn(b0,temp))); }/* END ARR2 */ /*------------------------------------ EP2 ------------------------------------*/ /* @param[in] a0 None */ /* @param[in] c0 None */ /* @param[in] a2 None */ /* @param[in] c2 None */ /* @param[in] a3 None */ /* @param[in] c3 None */ /* @param[in] temp Temperature */ /*-----------------------------------------------------------------------------*/ __device__ double EP2(double const a0, double const c0, double const a2, double const c2, double const a3, double const c3, double const temp) { double k0 = __dmul_rn(a0, exp(__ddiv_rn(-c0,temp))); double k2 = __dmul_rn(a2, exp(__ddiv_rn(-c2,temp))); double k3 = __dmul_rn(a3, exp(__ddiv_rn(-c3,temp))) * (double)1.0e6*CFACTOR; return __dadd_rn(k0, __ddiv_rn(k3, __dadd_rn((double)1.0, __ddiv_rn(k3,k2)))); }/* END EP2 */ /*------------------------------------ EP3 ------------------------------------*/ /* @param[in] a1 None */ /* @param[in] c1 None */ /* @param[in] a2 None */ /* @param[in] c2 None */ /* @param[in] temp Temperature */ /*-----------------------------------------------------------------------------*/ __device__ double EP3(double const a1, double const c1, double const a2, double const c2, double const temp) { double k1 = __dmul_rn(a1, exp(__ddiv_rn(-c1,temp))); double k2 = __dmul_rn(a2, exp(__ddiv_rn(-c2,temp))); return __dadd_rn(k1, __dmul_rn(k2, (double)1.0e6*CFACTOR)); }/* END EP3 */ /*------------------------------------ FALL -----------------------------------*/ /* @param[in] a0 None */ /* @param[in] b0 None */ /* @param[in] c0 None */ /* @param[in] a1 None */ /* @param[in] b1 None */ /* @param[in] c1 None */ /* @param[in] cf None */ /* @param[in] temp Temperature */ /*-----------------------------------------------------------------------------*/ __device__ double FALL(double const a0, double const b0, double const c0, double const a1, double const b1, double const c1, double const cf, double const temp) { /* Accuracy trumps precision in these calculations */ double k0 = a0 * pow(temp/(double)300.0, c0) * exp(-b0/temp) * (double)1.0e6*CFACTOR; double k1 = k0 / (a1 * pow(temp/(double)300.0, c1) * exp(-b1/temp)); return (k0/((double)1.0+k1)) * pow(cf, (double)1.0/((double)1.0+pow(log10(k1),(double)2.0))); }/* END FALL */ /*---------------------------------- Sunlight ---------------------------------*/ /* Calculates sunlight intensity in the range [0,1] as a function of time. */ /* Modify this routine to get the correct sunlight values for your model. */ /* */ /* @param[in] time Integration time */ /* @param[in] idx Current grid cell index */ /*-----------------------------------------------------------------------------*/ __device__ double Sunlight(double const time, size_t const idx) { int daysec = 24 * 3600; /* Seconds per day */ float sunrise = 5.5*3600; /* 5:30 local time */ float sunset = 19.5*3600; /* 7:30 local time */ float daily = time - ((int)time / daysec) * daysec; float tmp; /* Estimate sunlight intensity in the range [0,1] */ if ((daily >= sunrise) && (daily <= sunset)) { tmp = __ddiv_rn(2.0*daily - sunrise-sunset, sunset-sunrise); tmp = (tmp > 0) ? tmp * tmp : -tmp * tmp; tmp = 0.5 * (1.0 + cospi(tmp)); } else { tmp = 0.0; } return tmp; }/* END Sunlight */ /*------------------------------------ TROE -----------------------------------*/ /* Troe reactions (Stockwell et. al., 1997) */ /* */ /* @param[in] k0_300K None */ /* @param[in] n None */ /* @param[in] kinf_300K None */ /* @param[in] m None */ /* @param[in] temp Temperature */ /* @param[in] cair None */ /*-----------------------------------------------------------------------------*/ __device__ double TROE(double const k0_300K, double const n, double const kinf_300K, double const m, double const temp, double const cair) { double zt_help = 300.0/temp; double k0_T = k0_300K * pow(zt_help, n) * cair; double kinf_T = kinf_300K * pow(zt_help, m); double k_ratio = k0_T/kinf_T; return k0_T / (1.0 + k_ratio) * pow(0.6, 1.0/(1.0+pow(log10(k_ratio), 2))); }/* END TROE */ /*----------------------------------- TROEE -----------------------------------*/ /* Troe equilibrium reactions (Stockwell et. al., 1997) */ /* */ /* @param[in] a0 None */ /* @param[in] b0 None */ /* @param[in] k0_300K None */ /* @param[in] n None */ /* @param[in] kinf_300K None */ /* @param[in] m None */ /* @param[in] temp Temperature */ /* @param[in] cair None */ /*-----------------------------------------------------------------------------*/ __device__ double TROEE(double const a0, double const b0, double const k0_300K, double const n, double const kinf_300K, double const m, double const temp, double const cair) { double zt_help = 300.0/temp; double k0_T = k0_300K * pow(zt_help,n) * cair; double kinf_T = kinf_300K * pow(zt_help,m); double k_ratio = k0_T/kinf_T; double troe = k0_T / (1.0 + k_ratio) * pow(0.6, 1.0/(1.0+pow(log10(k_ratio), 2))); return a0 * exp(-b0 / temp) * troe; }/* END TROEE */ /*-------------------------------- Temperature --------------------------------*/ /* Calculates temperature (kelvin) as a function of time. */ /* Modify this routine to get the correct temperature values for your model. */ /* */ /* @param[in] time Integration time */ /* @param[in] idx Current grid cell index */ /*-----------------------------------------------------------------------------*/ __device__ double Temperature(double const time, size_t const idx) { float mintemp = 280; /* 280 Kelvin ~= 44 Fahrenheit */ float maxtemp = 300; /* 300 Kelvin ~= 80 Fahrenheit */ float tmp; /* Estimate temperature cycling from mintemp to maxtemp */ tmp = sinpi(__ddiv_rn(time,24*3600)); if (tmp < 0) { tmp = mintemp - tmp * (maxtemp-mintemp); } else { tmp = mintemp + tmp * (maxtemp-mintemp); } return tmp; }/* END Temperature */ /*---------------------------------- d_Rates ----------------------------------*/ /* CUDA kernel for Rates */ /* */ /* @param[in] ncells32 A multiple of 32 grid cells */ /* @param[in] time Integration time */ /* @param[in] idx Current grid cell index */ /* @param[out] rct Reaction rates */ /*-----------------------------------------------------------------------------*/ __global__ void d_Rates(size_t const ncells32, double const time, size_t const idx, double* rct, double CHI) { /* Sunlight intensity: 0 to 1 inclusive (uppercase for KPP compatibility) */ double SUN; /* Temperature in kelvin (uppercase for KPP compatibility) */ // double TEMP; // SUN = Sunlight(time, idx); // TEMP = Temperature(time, idx); size_t tidx = blockDim.x*blockIdx.x + threadIdx.x; if(tidx < ncells32) { rct += tidx; rct[0] = PHUX(1.07E-02,1.01319E+00,0.83330E+00,CHI); rct[ncells32] = PHUX(3.22E-05,4.45037E+00,0.78028E+00,CHI); rct[2*ncells32] = PHUX(5.36E-04,0.34764E+00,0.91030E+00,CHI); rct[3*ncells32] = PHUX(8.96E-04,0.99438E+00,0.83295E+00,CHI); rct[4*ncells32] = PHUX(5.48E-07,2.86922E+00,0.79561E+00,CHI); rct[5*ncells32] = PHUX(3.90E-06,2.37354E+00,0.79830E+00,CHI)+EQT2(TEMP); rct[6*ncells32] = PHUX(2.74E-02,0.26226E+00,0.92849E+00,CHI); rct[7*ncells32] = PHUX(2.73E-01,0.29327E+00,0.92401E+00,CHI); rct[8*ncells32] = PHUX(7.78E-06,1.91463E+00,0.79810E+00,CHI); rct[9*ncells32] = PHUX(4.92E-05,1.60973E+00,0.80184E+00,CHI); rct[10*ncells32] = PHUX(4.05E-05,2.06917E+00,0.80267E+00,CHI); rct[11*ncells32] = PHUX(5.40E-06,2.52915E+00,0.79722E+00,CHI); rct[12*ncells32] = PHUX(6.37E-06,1.76570E+00,0.80004E+00,CHI); rct[13*ncells32] = PHUX(6.37E-06,1.76570E+00,0.80004E+00,CHI); rct[14*ncells32] = PHUX(6.10E-09,9.17009E+00,0.72585E+00,CHI); rct[15*ncells32] = PHUX(1.32E-05,2.46350E+00,0.79768E+00,CHI); rct[16*ncells32] = PHUX(3.11E-03,0.55016E+00,0.88313E+00,CHI); rct[17*ncells32] = PHUX(3.11E-03,0.55016E+00,0.88313E+00,CHI); rct[18*ncells32] = PHUX(1.85E-03,0.57967E+00,0.87921E+00,CHI); rct[19*ncells32] = PHUX(6.39E-04,1.53712E+00,0.80233E+00,CHI); rct[20*ncells32] = PHUX(7.20E-08,9.11436E+00,0.72600E+00,CHI); rct[21*ncells32] = 2.55e19 * 6.0E-34 * pow((TEMP/300.),(-2.3)); rct[22*ncells32] = 6.50E-12 * exp( 120. / TEMP ); rct[23*ncells32] = 2.00E-11 * exp( 130. / TEMP ); rct[24*ncells32] = 3.20E-11 * exp( 67. / TEMP ); rct[25*ncells32] = 2.14e-10; rct[26*ncells32] = 1.4E-12 * exp( -1310. / TEMP ); rct[27*ncells32] = 1.70E-12 * exp( -940. / TEMP ); rct[28*ncells32] = 1.10E-14 * exp( -500. / TEMP ); rct[29*ncells32] = 3.45E-12 * exp( 270. / TEMP ); rct[30*ncells32] = TROE2( 1.8E-31, 3.2e0, 4.7E-12, 1.4e0, TEMP ); rct[31*ncells32] = 2.2E-13 * exp(620./TEMP) + 1.9E-33 * 2.55e19 * exp(980./TEMP); rct[32*ncells32] = 3.08e-34*exp(2820./TEMP)+2.66e-54*2.55e19*exp(3180./TEMP); rct[33*ncells32] = 3.30E-12 * exp( -200. / TEMP ); rct[34*ncells32] = TROE2( 7.0e-31, 2.6e0, 1.5e-11, 0.5e0, TEMP ); rct[35*ncells32] = 3.30E-39 * exp( 530. / TEMP ); rct[36*ncells32] = 1.40E-13 * exp( -2470. / TEMP ); rct[37*ncells32] = 1.80E-11 * exp( 110. / TEMP ); rct[38*ncells32] = 2.50E-14 * exp( -1230. / TEMP ); rct[39*ncells32] = 2.5e-12; rct[40*ncells32] = TROE2( 2.2e-30, 4.3e0, 1.5e-12, 0.5e0, TEMP ); rct[41*ncells32] = EQT(2.2e-30,4.3e0,1.5e-12,0.5e0,TEMP,9.09e+26,11200.e0); rct[42*ncells32] = 0.0; rct[43*ncells32] = TROE2( 2.6e-30, 3.2e0, 2.4e-11, 1.3e0, TEMP ); rct[44*ncells32] = SPEZ(7.2e-15,785.e0,4.1e-16,1440.e0,1.9e-33,725.e0,TEMP); rct[45*ncells32] = 1.30E-12 * exp( 380. / TEMP ); rct[46*ncells32] = 4.80E-11 * exp( 250. / TEMP ); rct[47*ncells32] = TROE2( 3.0e-31, 3.3e0, 1.5e-12, 0.0e0, TEMP ); rct[48*ncells32] = 2.4329175e-13; rct[49*ncells32] = TEMP * TEMP * 6.95E-18 * exp( -1280. / TEMP ); rct[50*ncells32] = TEMP * TEMP * 1.37E-17 * exp( -444. /TEMP ); rct[51*ncells32] = 1.59E-11 * exp( -540. / TEMP ); rct[52*ncells32] = 1.73E-11 * exp( -380. / TEMP ); rct[53*ncells32] = 3.64E-11 * exp( -380. / TEMP ); rct[54*ncells32] = 2.15E-12 * exp( 411. / TEMP ); rct[55*ncells32] = 5.32E-12 * exp( 504. / TEMP ); rct[56*ncells32] = 1.07E-11 * exp( 549. / TEMP ); rct[57*ncells32] = 2.10E-12 * exp( 322. / TEMP ); rct[58*ncells32] = 1.89E-11 * exp( 116. / TEMP ); rct[59*ncells32] = 4e-11; rct[60*ncells32] = 9e-12; rct[61*ncells32] = 6.87E-12 * exp( 256. / TEMP ); rct[62*ncells32] = 1.20E-11 * exp( -745. / TEMP ); rct[63*ncells32] = 1.15e-11; rct[64*ncells32] = 1.7e-11; rct[65*ncells32] = 2.8e-11; rct[66*ncells32] = 1e-11; rct[67*ncells32] = 1e-11; rct[68*ncells32] = 1e-11; rct[69*ncells32] = TEMP * TEMP *6.85E-18 * exp( -444. / TEMP ); rct[70*ncells32] = 1.55E-11 * exp( -540. / TEMP ); rct[71*ncells32] = 2.55E-11 * exp( 409. / TEMP ); rct[72*ncells32] = 2.6E-12 * exp ( 380. / TEMP); rct[73*ncells32] = 2.E16 * exp (-13500. / TEMP); rct[74*ncells32] = 4.7e-12; rct[75*ncells32] = 1.95E16 * exp(-13543. / TEMP ); rct[76*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[77*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[78*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[79*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[80*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[81*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[82*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[83*ncells32] = 3.50E-11 * exp( -180. / TEMP ); rct[84*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[85*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[86*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[87*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[88*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[89*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[90*ncells32] = 6.00E-13 * exp( -2058. / TEMP ); rct[91*ncells32] = 1.40E-12 * exp( -1900. / TEMP); rct[92*ncells32] = 6.00E-13 * exp( -2058. / TEMP ); rct[93*ncells32] = 1.40E-12 * exp( -1900. / TEMP); rct[94*ncells32] = 1.40E-12 * exp( -1900. / TEMP); rct[95*ncells32] = 2.2e-11; rct[96*ncells32] = 2.00E-12 * exp( -2923. / TEMP ); rct[97*ncells32] = 1.00E-11 * exp( -1895. / TEMP ); rct[98*ncells32] = 3.23E-11 * exp( -975. / TEMP ); rct[99*ncells32] = 5.81e-13; rct[100*ncells32] = 1.20E-14 * exp( -2633. / TEMP ); rct[101*ncells32] = 1.32E-14 * exp( -2105. / TEMP ); rct[102*ncells32] = 7.29E-15 * exp( -1136. / TEMP ); rct[103*ncells32] = 1.23E-14 * exp( -2013. / TEMP ); rct[104*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[105*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[106*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[107*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[108*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[109*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[110*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[111*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[112*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[113*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[114*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[115*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[116*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[117*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[118*ncells32] = 1.90E-13 * exp( 220. / TEMP ); rct[119*ncells32] = 1.40E-13 * exp( 220. / TEMP ); rct[120*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[121*ncells32] = 3.40E-14 * exp( 220. / TEMP ); rct[122*ncells32] = 2.90E-14 * exp( 220. / TEMP ); rct[123*ncells32] = 1.40E-13 * exp( 220. / TEMP ); rct[124*ncells32] = 1.40E-13 * exp( 220. / TEMP ); rct[125*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[126*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[127*ncells32] = 9.60E-13 * exp( 220. / TEMP ); rct[128*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[129*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[130*ncells32] = 9.60E-13 * exp( 220. / TEMP ); rct[131*ncells32] = 3.40E-13 * exp( 220. / TEMP ); rct[132*ncells32] = 1.00E-13 * exp( 220. / TEMP ); rct[133*ncells32] = 8.40E-14 * exp( 220. / TEMP ); rct[134*ncells32] = 7.20E-14 * exp( 220. / TEMP ); rct[135*ncells32] = 3.40E-13 * exp( 220. / TEMP ); rct[136*ncells32] = 3.40E-13 * exp( 220. / TEMP ); rct[137*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[138*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[139*ncells32] = 1.19E-12 * exp( 220. / TEMP ); rct[140*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[141*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[142*ncells32] = 1.19E-12 * exp( 220. / TEMP ); rct[143*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[144*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[145*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[146*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[147*ncells32] = 4.20E-12 * exp( 180. / TEMP ); rct[148*ncells32] = 7.70E-14 * exp( 1300. / TEMP ); rct[149*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[150*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[151*ncells32] = 1.70E-14 * exp( 220. / TEMP ); rct[152*ncells32] = 4.20E-14 * exp( 220. / TEMP ); rct[153*ncells32] = 3.60E-16 * exp( 220. / TEMP ); rct[154*ncells32] = 1.21E-11 * exp( 444. / TEMP ); rct[155*ncells32] = ARR2(1.19E-12,490.,TEMP); rct[156*ncells32] = ARR2(1.01E-15,-736.,TEMP); rct[157*ncells32] = 4e-12; rct[158*ncells32] = 1.5e-11; rct[159*ncells32] = ARR2(3.56E-14,708.,TEMP); rct[160*ncells32] = ARR2(7.40E-13,765.,TEMP); rct[161*ncells32] = 1.2e-12; rct[162*ncells32] = 1.7e-10; rct[163*ncells32] = 1.22e-11; rct[164*ncells32] = 2e-16; rct[165*ncells32] = 4e-12; rct[166*ncells32] = 1.5e-11; rct[167*ncells32] = ARR2(3.56E-14,708.,TEMP); rct[168*ncells32] = ARR2(7.40E-13,765.,TEMP); rct[169*ncells32] = 1.2e-12; rct[170*ncells32] = ARR2(2.43E-12,360.,TEMP); rct[171*ncells32] = ARR2(2.05E-13,1300.,TEMP); rct[172*ncells32] = 2e-12; rct[173*ncells32] = 1e-10; rct[174*ncells32] = 1.3e-11; rct[175*ncells32] = 0.5*(4.13E-12 * exp( 452. / TEMP ) + 1.86E-11 * exp( 175. / TEMP )); rct[176*ncells32] = 0.5*(1.36E-15 * exp( -2112. / TEMP ) + 7.51E-16 * exp( -1521. / TEMP )); rct[177*ncells32] = ARR2(2.54E-12,360.,TEMP); rct[178*ncells32] = ARR2(1.82E-13,1300.,TEMP); rct[179*ncells32] = 2e-12; rct[180*ncells32] = TROE2( 9.7e-29, -5.6e0, 9.3e-12, -1.5e0, TEMP ); rct[181*ncells32] = TROE2( 9.7e-29, -5.6e0, 9.3e-12, -1.5e0, TEMP )/(ARR2(9.0E-19,14000.,TEMP)); rct[182*ncells32] = 3.6e-12; rct[183*ncells32] = 3e-11; rct[184*ncells32] = 3e-12; rct[185*ncells32] = ARR2(5.60E-12,270.,TEMP); rct[186*ncells32] = ARR2(1.9E-13,500.,TEMP); rct[187*ncells32] = ARR2(9.6E-12,-234.,TEMP); rct[188*ncells32] = ARR2(3.04E-12,350.,TEMP)*ARR2(1.106E-31,7460.,TEMP)*2.55e19/(1+ARR2(1.106E-31,7460.,TEMP)*2.55e19); rct[189*ncells32] = 5.8e-11; rct[190*ncells32] = 2.5e-12; rct[191*ncells32] = 2.5e-12; rct[192*ncells32] = 2.5e-12; rct[193*ncells32] = 0.0; } }/* END d_Rates */ /*----------------------------------- Rates -----------------------------------*/ /* Calculates reaction rate coefficients */ /* */ /* @param[in] ncells32 A multiple of 32 grid cells */ /* @param[in] time Integration time */ /* @param[in] idx Current grid cell index */ /* @param[out] d_rct Reaction rates in device memory */ /*-----------------------------------------------------------------------------*/ void Rates(size_t const ncells32, double const time, size_t const idx, double* d_rct, double CHI) { size_t nBlocks = ((ncells32 + 127) & ~127) >> 7; size_t nThreads = 128; d_Rates<<<nBlocks, nThreads>>>(ncells32, time, idx, d_rct, CHI); }/* END Rates */ /*------------------------- END box_model_rates.h END -------------------------*/
46409be1398050c27e7d5512c50b3804f2d50901.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "cuda/ms_deform_im2col_cuda.cuh" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // #include <THH/THH.h> // #include <THH/THHAtomics.cuh> // #include <THH/THHDeviceUtils.cuh> // extern THCState *state; // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu at::Tensor ms_deform_attn_cuda_forward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const int im2col_step) // value: N_, S_, M_, D_ // spatial_shapes: L_, 2 // sampling_loc: N_, Lq_, M_, L_, P_, 2 { AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = ::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); auto output = at::empty({batch, num_query, num_heads, channels}, value.options()); auto level_start_index = at::zeros({num_levels}, spatial_shapes.options()); for (int lvl = 1; lvl < num_levels; ++lvl) { auto shape_prev = spatial_shapes.select(0, lvl-1); auto size_prev = at::mul(shape_prev.select(0, 0), shape_prev.select(0, 1)); level_start_index.select(0, lvl) = at::add(level_start_index.select(0, lvl-1), size_prev); } // define alias for easy use const int batch_n = im2col_step_; auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; for (int n = 0; n < batch/im2col_step_; ++n) { auto columns = at::empty({num_levels*num_point, batch_n, num_query, num_heads, channels}, value.options()); AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] { ms_deformable_im2col_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), value.data<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, columns.data<scalar_t>()); })); output_n.select(0, n) = at::sum(columns, 0); } output = output.view({batch, num_query, num_heads*channels}); return output; } std::vector<at::Tensor> ms_deform_attn_cuda_backward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const at::Tensor &grad_output, const int im2col_step) { AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = ::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); auto grad_value = at::zeros_like(value); auto grad_sampling_loc = at::zeros_like(sampling_loc); auto grad_attn_weight = at::zeros_like(attn_weight); auto level_start_index = at::zeros({num_levels}, spatial_shapes.options()); for (int lvl = 1; lvl < num_levels; ++lvl) { auto shape_prev = spatial_shapes.select(0, lvl-1); auto size_prev = at::mul(shape_prev.select(0, 0), shape_prev.select(0, 1)); level_start_index.select(0, lvl) = at::add(level_start_index.select(0, lvl-1), size_prev); } const int batch_n = im2col_step_; auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); for (int n = 0; n < batch/im2col_step_; ++n) { auto grad_output_g = grad_output_n.select(0, n); AT_DISPATCH_FLOATING_TYPES(value.type(), "deform_conv_backward_cuda", ([&] { // gradient w.r.t. sampling location & attention weight ms_deformable_col2im_coord_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_g.data<scalar_t>(), value.data<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size); // gradient w.r.t. value ms_deformable_col2im_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_g.data<scalar_t>(), spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value.data<scalar_t>() + n * im2col_step_ * per_value_size); })); } return { grad_value, grad_sampling_loc, grad_attn_weight }; }
46409be1398050c27e7d5512c50b3804f2d50901.cu
#include <vector> #include "cuda/ms_deform_im2col_cuda.cuh" #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> // #include <THC/THC.h> // #include <THC/THCAtomics.cuh> // #include <THC/THCDeviceUtils.cuh> // extern THCState *state; // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu at::Tensor ms_deform_attn_cuda_forward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const int im2col_step) // value: N_, S_, M_, D_ // spatial_shapes: L_, 2 // sampling_loc: N_, Lq_, M_, L_, P_, 2 { AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = std::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); auto output = at::empty({batch, num_query, num_heads, channels}, value.options()); auto level_start_index = at::zeros({num_levels}, spatial_shapes.options()); for (int lvl = 1; lvl < num_levels; ++lvl) { auto shape_prev = spatial_shapes.select(0, lvl-1); auto size_prev = at::mul(shape_prev.select(0, 0), shape_prev.select(0, 1)); level_start_index.select(0, lvl) = at::add(level_start_index.select(0, lvl-1), size_prev); } // define alias for easy use const int batch_n = im2col_step_; auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; for (int n = 0; n < batch/im2col_step_; ++n) { auto columns = at::empty({num_levels*num_point, batch_n, num_query, num_heads, channels}, value.options()); AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] { ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), value.data<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, columns.data<scalar_t>()); })); output_n.select(0, n) = at::sum(columns, 0); } output = output.view({batch, num_query, num_heads*channels}); return output; } std::vector<at::Tensor> ms_deform_attn_cuda_backward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const at::Tensor &grad_output, const int im2col_step) { AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = std::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); auto grad_value = at::zeros_like(value); auto grad_sampling_loc = at::zeros_like(sampling_loc); auto grad_attn_weight = at::zeros_like(attn_weight); auto level_start_index = at::zeros({num_levels}, spatial_shapes.options()); for (int lvl = 1; lvl < num_levels; ++lvl) { auto shape_prev = spatial_shapes.select(0, lvl-1); auto size_prev = at::mul(shape_prev.select(0, 0), shape_prev.select(0, 1)); level_start_index.select(0, lvl) = at::add(level_start_index.select(0, lvl-1), size_prev); } const int batch_n = im2col_step_; auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); for (int n = 0; n < batch/im2col_step_; ++n) { auto grad_output_g = grad_output_n.select(0, n); AT_DISPATCH_FLOATING_TYPES(value.type(), "deform_conv_backward_cuda", ([&] { // gradient w.r.t. sampling location & attention weight ms_deformable_col2im_coord_cuda(at::cuda::getCurrentCUDAStream(), grad_output_g.data<scalar_t>(), value.data<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size); // gradient w.r.t. value ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), grad_output_g.data<scalar_t>(), spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(), sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value.data<scalar_t>() + n * im2col_step_ * per_value_size); })); } return { grad_value, grad_sampling_loc, grad_attn_weight }; }
da7cd6724e11d5d2e0be5ddc438a223aa96b1b5e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (C) 2018 ETH Zurich // Copyright (C) 2018 UT-Battelle, LLC // All rights reserved. // // See LICENSE.txt for terms of usage. // See CITATION.txt for citation guidelines if you use this code for scientific publications. // // Author: Giovanni Balduzzi ([email protected]) // Weile Wei ([email protected]) // // Implements the GPU kernels used by the DFT algorithm. #include "dca/phys/dca_step/cluster_solver/shared_tools/accumulation/tp/kernels_interface.hpp" #include <array> #include <cassert> #include <complex> #include "dca/platform/dca_gpu.h" #include "dca/parallel/util/get_workload.hpp" #include "dca/util/integer_division.hpp" #include "dca/linalg/util/cast_gpu.hpp" #include "dca/linalg/util/atomic_add_cuda.cu.hpp" #include "dca/linalg/util/complex_operators_cuda.cu.hpp" #include "dca/phys/dca_step/cluster_solver/shared_tools/accumulation/tp/g4_helper.cuh" #include "dca/phys/four_point_type.hpp" namespace dca { namespace phys { namespace solver { namespace accumulator { namespace details { // dca::phys::solver::accumulator::details:: using namespace linalg; using linalg::util::CudaComplex; using linalg::util::castCudaComplex; using phys::FourPointType; std::array<dim3, 2> getBlockSize(const uint i, const uint j, const uint block_size = 32) { const uint n_threads_i = ::min(block_size, i); const uint n_threads_j = ::min(block_size, j); if (n_threads_i * n_threads_j > 32 * 32) throw(std::logic_error("Block size is too big")); const uint n_blocks_i = dca::util::ceilDiv(i, n_threads_i); const uint n_blocks_j = dca::util::ceilDiv(j, n_threads_j); return std::array<dim3, 2>{dim3(n_blocks_i, n_blocks_j), dim3(n_threads_i, n_threads_j)}; } template <typename Real> __global__ void computeGSinglebandKernel(CudaComplex<Real>* __restrict__ G, int ldg, const CudaComplex<Real>* __restrict__ G0, int nk, int nw_pos, const Real beta) { // Computes G = -G0(w1) * M(w1, w2) * G(w2) + (w1 == w2) * beta * G0(w1). const int n_rows = nk * nw_pos; const int n_cols = n_rows * 2; const int id_i = blockIdx.x * blockDim.x + threadIdx.x; const int id_j = blockIdx.y * blockDim.y + threadIdx.y; if (id_i >= n_rows || id_j >= n_cols) return; auto get_indices = [=](const int id, int& k, int& w) { w = id / nk; k = id - nk * w; }; int w1, w2, k1, k2; get_indices(id_i, k1, w1); get_indices(id_j, k2, w2); const CudaComplex<Real> G0_w1 = G0[k1 + nk * (w1 + nw_pos)]; const CudaComplex<Real> G0_w2 = G0[k2 + nk * w2]; G[id_i + ldg * id_j] *= -G0_w1 * G0_w2; if (k1 == k2 && w1 + nw_pos == w2) { G[id_i + ldg * id_j] += G0_w1 * beta; } } template <typename Real> void computeGSingleband(std::complex<Real>* G, int ldg, const std::complex<Real>* G0, int nk, int nw_pos, const Real beta, hipStream_t stream) { const int n_rows = nk * nw_pos; auto blocks = getBlockSize(n_rows, n_rows * 2); hipLaunchKernelGGL(( computeGSinglebandKernel), dim3(blocks[0]), dim3(blocks[1]), 0, stream, castCudaComplex(G), ldg, castCudaComplex(G0), nk, nw_pos, beta); } template <typename Real> __global__ void computeGMultibandKernel(CudaComplex<Real>* __restrict__ G, int ldg, const CudaComplex<Real>* __restrict__ G0, int ldg0, int nb, int nk, int nw_pos, Real beta) { // Computes G = -G0(w1) * M(w1, w2) * G(w2) + (w1 == w2) * beta * G0(w1). // The product is to be intended as matrix-matrix multiplication in band space. const int id_i = blockIdx.x * blockDim.x + threadIdx.x; const int id_j = blockIdx.y * blockDim.y + threadIdx.y; if (id_i >= nb * nk * nw_pos || id_j >= nb * nk * nw_pos * 2) return; const int no = nb * nk; auto get_indices = [=](int id, int& b, int& k, int& w) { w = id / no; id -= w * no; k = id / nb; b = id - k * nb; }; int w1, w2, k1, k2, b1, b2; get_indices(id_i, b1, k1, w1); get_indices(id_j, b2, k2, w2); w1 += nw_pos; // Note: cuda does not support templated shared memory. extern __shared__ char shared_mem[]; CudaComplex<Real>* const M_block = reinterpret_cast<CudaComplex<Real>*>(shared_mem); const int local_row_start = (threadIdx.y / nb) * nb; const int local_col_start = (threadIdx.x / nb) * nb; const int ldm = blockDim.y; CudaComplex<Real>* const M = M_block + local_row_start + ldm * local_col_start; CudaComplex<Real>& G_val = G[id_i + ldg * id_j]; M[b1 + ldm * b2] = G_val; __syncthreads(); const CudaComplex<Real>* const G0_w1 = G0 + nb * k1 + no * w1; const CudaComplex<Real>* const G0_w2 = G0 + nb * k2 + no * w2; G_val.x = G_val.y = 0; for (int j = 0; j < nb; ++j) { const CudaComplex<Real> G0_w2_val = G0_w2[j + ldg0 * b2]; for (int i = 0; i < nb; ++i) G_val -= G0_w1[b1 + ldg0 * i] * M[i + ldm * j] * G0_w2_val; } if (G0_w1 == G0_w2) G_val += G0_w1[b1 + ldg0 * b2] * beta; } template <typename Real> void computeGMultiband(std::complex<Real>* G, int ldg, const std::complex<Real>* G0, int ldg0, int nb, int nk, int nw_pos, Real beta, hipStream_t stream) { const int n_rows = nb * nk * nw_pos; auto get_block_width = [nb] { if (nb > 16) throw(std::out_of_range("Too many bands.")); for (int candidate = 16; candidate > 0; --candidate) if (!(candidate % nb)) return candidate; return -1; }; const int width = get_block_width(); const auto blocks = getBlockSize(n_rows, n_rows * 2, width); hipLaunchKernelGGL(( computeGMultibandKernel), dim3(blocks[0]), dim3(blocks[1]), width * width * sizeof(std::complex<Real>), stream, castCudaComplex(G), ldg, castCudaComplex(G0), ldg0, nb, nk, nw_pos, beta); } template <typename Real, FourPointType type> __global__ void updateG4Kernel(CudaComplex<Real>* __restrict__ G4, const CudaComplex<Real>* __restrict__ G_up, const int ldgu, const CudaComplex<Real>* __restrict__ G_down, const int ldgd, const int sign, const bool atomic, const uint64_t start, const uint64_t end) { // TODO: reduce code duplication. // TODO: decrease, if possible, register pressure. E.g. a single thread computes all bands. const uint64_t local_g4_index = static_cast<uint64_t>(blockIdx.x) * static_cast<uint64_t>(blockDim.x) + static_cast<uint64_t>(threadIdx.x); const uint64_t g4_index = local_g4_index + start; if (g4_index >= end) { // out of domain. return; } unsigned b1, b2, b3, b4, k1, k2, k_ex, w1, w2, w_ex; g4_helper.unrollIndex(g4_index, b1, b2, b3, b4, k1, w1, k2, w2, k_ex, w_ex); const unsigned nb = g4_helper.get_bands(); const unsigned nk = g4_helper.get_cluster_size(); CudaComplex<Real> contribution; const unsigned no = nk * nb; auto cond_conj = [](const CudaComplex<Real> a, const bool cond) { return cond ? conj(a) : a; }; // Compute the contribution to G4. In all the products of Green's function of type Ga * Gb, // the dependency on the bands is implied as Ga(b1, b2) * Gb(b2, b3). Sums and differences with // the exchange momentum, implies the same operation is performed with the exchange frequency. // See tp_accumulator.hpp for more details. switch (type) { case FourPointType::PARTICLE_HOLE_TRANSVERSE: { // contribution <- -\sum_s G(k1, k2, s) * G(k2 + k_ex, k1 + k_ex, -s) int w1_a(w1); int w2_a(w2); int k1_a(k1); int k2_a(k2); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b4 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(g4_helper.addWex(w1, w_ex)); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = g4_helper.addKex(k1, k_ex); const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b3 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_down[i_b + ldgd * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_up[i_b + ldgu * j_b], conj_b); contribution = -(Ga_1 * Gb_1 + Ga_2 * Gb_2); } break; // The PARTICLE_HOLE_MAGNETIC contribution is computed in two parts: case FourPointType::PARTICLE_HOLE_MAGNETIC: { // contribution <- -\sum_s G(k1, k2, s) * G(k2 + k_ex, k1 + k_ex, s) int w1_a(w1); int w2_a(w2); int k1_a(k1); int k2_a(k2); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b4 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(g4_helper.addWex(w1, w_ex)); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = g4_helper.addKex(k1, k_ex); const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b3 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_up[i_b + ldgu * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_down[i_b + ldgd * j_b], conj_b); contribution = -(Ga_1 * Gb_1 + Ga_2 * Gb_2); } // Spin Difference Contribution // new scope to reuse local index variables { // contribution += (\sum_s s * G(k1, k1 + k_ex)) * (\sum_s s * G(k2 + k_ex, k2)) int w1_a(w1); int w2_a(g4_helper.addWex(w1, w_ex)); int k1_a = k1; int k2_a = g4_helper.addKex(k1, k_ex); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b3 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga = cond_conj(G_up[i_a + ldgu * j_a] - G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(w2); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = k2; const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b4 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb = cond_conj(G_up[i_b + ldgu * j_b] - G_down[i_b + ldgd * j_b], conj_b); contribution += (Ga * Gb); } break; // The PARTICLE_HOLE_CHARGE contribution is computed in two parts: case FourPointType::PARTICLE_HOLE_CHARGE: { // contribution <- -\sum_s G(k1, k2, s) * G(k2 + k_ex, k1 + k_ex, s) int w1_a(w1); int w2_a(w2); int k1_a(k1); int k2_a(k2); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b4 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(g4_helper.addWex(w1, w_ex)); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = g4_helper.addKex(k1, k_ex); const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b3 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_up[i_b + ldgu * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_down[i_b + ldgd * j_b], conj_b); contribution = -(Ga_1 * Gb_1 + Ga_2 * Gb_2); } // Spin Difference Contribution // new scope to reuse local index variables { // contribution += (\sum_s G(k1, k1 + k_ex, s)) * (\sum_s G(k2 + k_ex, k2, s)) // TODO: pull into function, index setting code is identical for Spin cases int w1_a(w1); int w2_a(g4_helper.addWex(w1, w_ex)); int k1_a = k1; int k2_a = g4_helper.addKex(k1, k_ex); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b3 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga = cond_conj(G_up[i_a + ldgu * j_a] + G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(w2); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = k2; const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b4 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb = cond_conj(G_up[i_b + ldgu * j_b] + G_down[i_b + ldgd * j_b], conj_b); contribution += (Ga * Gb); } break; // The PARTICLE_HOLE_LONGITUDINAL_UP_UP contribution is computed in two parts: case FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_UP: { // contribution <- \sum_s G(k1, k1+k_ex, s) * G(k2+k_ex, k2, s) int w1_a(w1); int w2_a(g4_helper.addWex(w1, w_ex)); int k1_a = k1; int k2_a = g4_helper.addKex(k1, k_ex); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b3 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(w2); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = k2; const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b4 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_up[i_b + ldgd * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_down[i_b + ldgu * j_b], conj_b); contribution = (Ga_1 * Gb_1 + Ga_2 * Gb_2); } { // contribution <- -\sum_s G(k1, k2, s) * G(k2 + k_ex, k1 + k_ex, s) int w1_a(w1); int w2_a(w2); int k1_a(k1); int k2_a(k2); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b4 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(g4_helper.addWex(w1, w_ex)); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = g4_helper.addKex(k1, k_ex); const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b3 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_up[i_b + ldgd * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_down[i_b + ldgu * j_b], conj_b); contribution += -(Ga_1 * Gb_1 + Ga_2 * Gb_2); } break; case FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_DOWN: { // contribution <- \sum_s G(k1, k1+k_ex, s) * G(k2+k_ex, k2, -s) int w1_a(w1); int w2_a(g4_helper.addWex(w1, w_ex)); int k1_a = k1; int k2_a = g4_helper.addKex(k1, k_ex); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b3 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(w2); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = k2; const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b4 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_down[i_b + ldgd * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_up[i_b + ldgu * j_b], conj_b); contribution = (Ga_1 * Gb_1 + Ga_2 * Gb_2); } break; case FourPointType::PARTICLE_PARTICLE_UP_DOWN: { // contribution <- -\sum_s G(k_ex - k2, k_ex - k1, s) * G(k2, k1, -s). int w1_a(w1); int w2_a(w2); int k1_a(k1); int k2_a(k2); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b3 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.wexMinus(w1, w_ex)); int w2_b(g4_helper.wexMinus(w2, w_ex)); int k1_b = g4_helper.kexMinus(k1, k_ex); int k2_b = g4_helper.kexMinus(k2, k_ex); const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b4 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_down[i_b + ldgd * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_up[i_b + ldgu * j_b], conj_b); contribution = (Ga_1 * Gb_1 + Ga_2 * Gb_2); } break; default: // abort asm("trap;"); } CudaComplex<Real>* const result_ptr = G4 + local_g4_index; if (atomic) dca::linalg::atomicAdd(result_ptr, contribution * 0.5f * static_cast<Real>(sign)); else *result_ptr += contribution * 0.5f * static_cast<Real>(sign); } template <typename Real, FourPointType type> float updateG4(std::complex<Real>* G4, const std::complex<Real>* G_up, const int ldgu, const std::complex<Real>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end) { constexpr const std::size_t n_threads = 256; const unsigned n_blocks = dca::util::ceilDiv(end - start, n_threads); hipLaunchKernelGGL(( updateG4Kernel<Real, type>) , dim3(n_blocks), dim3(n_threads), 0, stream, castCudaComplex(G4), castCudaComplex(G_up), ldgu, castCudaComplex(G_down), ldgd, sign, atomic, start, end); // Check for errors. auto err = hipPeekAtLastError(); if (err != hipSuccess) { linalg::util::printErrorMessage(err, __FUNCTION__, __FILE__, __LINE__); throw(std::runtime_error("CUDA failed to launch the G4 kernel.")); } const std::size_t n_updates = end - start; switch (type) { // Note: sign flips are ignored and a single complex * real multiplication is // present in all modes. case FourPointType::PARTICLE_HOLE_TRANSVERSE: // Each update of a G4 entry involves 2 complex additions and 2 complex multiplications. return 18. * n_updates; case FourPointType::PARTICLE_HOLE_MAGNETIC: // Each update of a G4 entry involves 3 complex additions and 3 complex multiplications. return 26. * n_updates; case FourPointType::PARTICLE_HOLE_CHARGE: // Each update of a G4 entry involves 3 complex additions and 3 complex multiplications. return 26. * n_updates; case FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_UP: // Each update of a G4 entry involves 3 complex additions and 4 complex multiplications. return 32 * n_updates; case FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_DOWN: // Each update of a G4 entry involves 2 complex additions and 2 complex multiplications. return 18. * n_updates; case FourPointType::PARTICLE_PARTICLE_UP_DOWN: // Each update of a G4 entry involves 2 complex additions and 2 complex multiplications. return 18. * n_updates; default: throw(std::logic_error("Invalid mode")); } } // Explicit instantiation. template void computeGSingleband<float>(std::complex<float>* G, int ldg, const std::complex<float>* G0, int nk, int nw, const float beta, hipStream_t stream); template void computeGMultiband<float>(std::complex<float>* G, int ldg, const std::complex<float>* G0, int ldg0, int nb, int nk, int nw, float beta, hipStream_t stream); template void computeGSingleband<double>(std::complex<double>* G, int ldg, const std::complex<double>* G0, int nk, int nw_pos, const double beta, hipStream_t stream); template void computeGMultiband<double>(std::complex<double>* G, int ldg, const std::complex<double>* G0, int ldg0, int nb, int nk, int nw_pos, double beta, hipStream_t stream); template float updateG4<float, FourPointType::PARTICLE_HOLE_TRANSVERSE>( std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); template float updateG4<float, FourPointType::PARTICLE_HOLE_MAGNETIC>( std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); template float updateG4<float, FourPointType::PARTICLE_HOLE_CHARGE>(std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); template float updateG4<float, FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_UP>( std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); template float updateG4<float, FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_DOWN>( std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); template float updateG4<float, FourPointType::PARTICLE_PARTICLE_UP_DOWN>( std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_HOLE_TRANSVERSE>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_HOLE_MAGNETIC>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_HOLE_CHARGE>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_UP>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_DOWN>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_PARTICLE_UP_DOWN>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, hipStream_t stream, std::size_t start, std::size_t end); } // namespace details } // namespace accumulator } // namespace solver } // namespace phys } // namespace dca
da7cd6724e11d5d2e0be5ddc438a223aa96b1b5e.cu
// Copyright (C) 2018 ETH Zurich // Copyright (C) 2018 UT-Battelle, LLC // All rights reserved. // // See LICENSE.txt for terms of usage. // See CITATION.txt for citation guidelines if you use this code for scientific publications. // // Author: Giovanni Balduzzi ([email protected]) // Weile Wei ([email protected]) // // Implements the GPU kernels used by the DFT algorithm. #include "dca/phys/dca_step/cluster_solver/shared_tools/accumulation/tp/kernels_interface.hpp" #include <array> #include <cassert> #include <complex> #include "dca/platform/dca_gpu.h" #include "dca/parallel/util/get_workload.hpp" #include "dca/util/integer_division.hpp" #include "dca/linalg/util/cast_gpu.hpp" #include "dca/linalg/util/atomic_add_cuda.cu.hpp" #include "dca/linalg/util/complex_operators_cuda.cu.hpp" #include "dca/phys/dca_step/cluster_solver/shared_tools/accumulation/tp/g4_helper.cuh" #include "dca/phys/four_point_type.hpp" namespace dca { namespace phys { namespace solver { namespace accumulator { namespace details { // dca::phys::solver::accumulator::details:: using namespace linalg; using linalg::util::CudaComplex; using linalg::util::castCudaComplex; using phys::FourPointType; std::array<dim3, 2> getBlockSize(const uint i, const uint j, const uint block_size = 32) { const uint n_threads_i = std::min(block_size, i); const uint n_threads_j = std::min(block_size, j); if (n_threads_i * n_threads_j > 32 * 32) throw(std::logic_error("Block size is too big")); const uint n_blocks_i = dca::util::ceilDiv(i, n_threads_i); const uint n_blocks_j = dca::util::ceilDiv(j, n_threads_j); return std::array<dim3, 2>{dim3(n_blocks_i, n_blocks_j), dim3(n_threads_i, n_threads_j)}; } template <typename Real> __global__ void computeGSinglebandKernel(CudaComplex<Real>* __restrict__ G, int ldg, const CudaComplex<Real>* __restrict__ G0, int nk, int nw_pos, const Real beta) { // Computes G = -G0(w1) * M(w1, w2) * G(w2) + (w1 == w2) * beta * G0(w1). const int n_rows = nk * nw_pos; const int n_cols = n_rows * 2; const int id_i = blockIdx.x * blockDim.x + threadIdx.x; const int id_j = blockIdx.y * blockDim.y + threadIdx.y; if (id_i >= n_rows || id_j >= n_cols) return; auto get_indices = [=](const int id, int& k, int& w) { w = id / nk; k = id - nk * w; }; int w1, w2, k1, k2; get_indices(id_i, k1, w1); get_indices(id_j, k2, w2); const CudaComplex<Real> G0_w1 = G0[k1 + nk * (w1 + nw_pos)]; const CudaComplex<Real> G0_w2 = G0[k2 + nk * w2]; G[id_i + ldg * id_j] *= -G0_w1 * G0_w2; if (k1 == k2 && w1 + nw_pos == w2) { G[id_i + ldg * id_j] += G0_w1 * beta; } } template <typename Real> void computeGSingleband(std::complex<Real>* G, int ldg, const std::complex<Real>* G0, int nk, int nw_pos, const Real beta, cudaStream_t stream) { const int n_rows = nk * nw_pos; auto blocks = getBlockSize(n_rows, n_rows * 2); computeGSinglebandKernel<<<blocks[0], blocks[1], 0, stream>>>( castCudaComplex(G), ldg, castCudaComplex(G0), nk, nw_pos, beta); } template <typename Real> __global__ void computeGMultibandKernel(CudaComplex<Real>* __restrict__ G, int ldg, const CudaComplex<Real>* __restrict__ G0, int ldg0, int nb, int nk, int nw_pos, Real beta) { // Computes G = -G0(w1) * M(w1, w2) * G(w2) + (w1 == w2) * beta * G0(w1). // The product is to be intended as matrix-matrix multiplication in band space. const int id_i = blockIdx.x * blockDim.x + threadIdx.x; const int id_j = blockIdx.y * blockDim.y + threadIdx.y; if (id_i >= nb * nk * nw_pos || id_j >= nb * nk * nw_pos * 2) return; const int no = nb * nk; auto get_indices = [=](int id, int& b, int& k, int& w) { w = id / no; id -= w * no; k = id / nb; b = id - k * nb; }; int w1, w2, k1, k2, b1, b2; get_indices(id_i, b1, k1, w1); get_indices(id_j, b2, k2, w2); w1 += nw_pos; // Note: cuda does not support templated shared memory. extern __shared__ char shared_mem[]; CudaComplex<Real>* const M_block = reinterpret_cast<CudaComplex<Real>*>(shared_mem); const int local_row_start = (threadIdx.y / nb) * nb; const int local_col_start = (threadIdx.x / nb) * nb; const int ldm = blockDim.y; CudaComplex<Real>* const M = M_block + local_row_start + ldm * local_col_start; CudaComplex<Real>& G_val = G[id_i + ldg * id_j]; M[b1 + ldm * b2] = G_val; __syncthreads(); const CudaComplex<Real>* const G0_w1 = G0 + nb * k1 + no * w1; const CudaComplex<Real>* const G0_w2 = G0 + nb * k2 + no * w2; G_val.x = G_val.y = 0; for (int j = 0; j < nb; ++j) { const CudaComplex<Real> G0_w2_val = G0_w2[j + ldg0 * b2]; for (int i = 0; i < nb; ++i) G_val -= G0_w1[b1 + ldg0 * i] * M[i + ldm * j] * G0_w2_val; } if (G0_w1 == G0_w2) G_val += G0_w1[b1 + ldg0 * b2] * beta; } template <typename Real> void computeGMultiband(std::complex<Real>* G, int ldg, const std::complex<Real>* G0, int ldg0, int nb, int nk, int nw_pos, Real beta, cudaStream_t stream) { const int n_rows = nb * nk * nw_pos; auto get_block_width = [nb] { if (nb > 16) throw(std::out_of_range("Too many bands.")); for (int candidate = 16; candidate > 0; --candidate) if (!(candidate % nb)) return candidate; return -1; }; const int width = get_block_width(); const auto blocks = getBlockSize(n_rows, n_rows * 2, width); computeGMultibandKernel<<<blocks[0], blocks[1], width * width * sizeof(std::complex<Real>), stream>>>( castCudaComplex(G), ldg, castCudaComplex(G0), ldg0, nb, nk, nw_pos, beta); } template <typename Real, FourPointType type> __global__ void updateG4Kernel(CudaComplex<Real>* __restrict__ G4, const CudaComplex<Real>* __restrict__ G_up, const int ldgu, const CudaComplex<Real>* __restrict__ G_down, const int ldgd, const int sign, const bool atomic, const uint64_t start, const uint64_t end) { // TODO: reduce code duplication. // TODO: decrease, if possible, register pressure. E.g. a single thread computes all bands. const uint64_t local_g4_index = static_cast<uint64_t>(blockIdx.x) * static_cast<uint64_t>(blockDim.x) + static_cast<uint64_t>(threadIdx.x); const uint64_t g4_index = local_g4_index + start; if (g4_index >= end) { // out of domain. return; } unsigned b1, b2, b3, b4, k1, k2, k_ex, w1, w2, w_ex; g4_helper.unrollIndex(g4_index, b1, b2, b3, b4, k1, w1, k2, w2, k_ex, w_ex); const unsigned nb = g4_helper.get_bands(); const unsigned nk = g4_helper.get_cluster_size(); CudaComplex<Real> contribution; const unsigned no = nk * nb; auto cond_conj = [](const CudaComplex<Real> a, const bool cond) { return cond ? conj(a) : a; }; // Compute the contribution to G4. In all the products of Green's function of type Ga * Gb, // the dependency on the bands is implied as Ga(b1, b2) * Gb(b2, b3). Sums and differences with // the exchange momentum, implies the same operation is performed with the exchange frequency. // See tp_accumulator.hpp for more details. switch (type) { case FourPointType::PARTICLE_HOLE_TRANSVERSE: { // contribution <- -\sum_s G(k1, k2, s) * G(k2 + k_ex, k1 + k_ex, -s) int w1_a(w1); int w2_a(w2); int k1_a(k1); int k2_a(k2); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b4 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(g4_helper.addWex(w1, w_ex)); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = g4_helper.addKex(k1, k_ex); const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b3 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_down[i_b + ldgd * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_up[i_b + ldgu * j_b], conj_b); contribution = -(Ga_1 * Gb_1 + Ga_2 * Gb_2); } break; // The PARTICLE_HOLE_MAGNETIC contribution is computed in two parts: case FourPointType::PARTICLE_HOLE_MAGNETIC: { // contribution <- -\sum_s G(k1, k2, s) * G(k2 + k_ex, k1 + k_ex, s) int w1_a(w1); int w2_a(w2); int k1_a(k1); int k2_a(k2); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b4 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(g4_helper.addWex(w1, w_ex)); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = g4_helper.addKex(k1, k_ex); const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b3 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_up[i_b + ldgu * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_down[i_b + ldgd * j_b], conj_b); contribution = -(Ga_1 * Gb_1 + Ga_2 * Gb_2); } // Spin Difference Contribution // new scope to reuse local index variables { // contribution += (\sum_s s * G(k1, k1 + k_ex)) * (\sum_s s * G(k2 + k_ex, k2)) int w1_a(w1); int w2_a(g4_helper.addWex(w1, w_ex)); int k1_a = k1; int k2_a = g4_helper.addKex(k1, k_ex); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b3 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga = cond_conj(G_up[i_a + ldgu * j_a] - G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(w2); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = k2; const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b4 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb = cond_conj(G_up[i_b + ldgu * j_b] - G_down[i_b + ldgd * j_b], conj_b); contribution += (Ga * Gb); } break; // The PARTICLE_HOLE_CHARGE contribution is computed in two parts: case FourPointType::PARTICLE_HOLE_CHARGE: { // contribution <- -\sum_s G(k1, k2, s) * G(k2 + k_ex, k1 + k_ex, s) int w1_a(w1); int w2_a(w2); int k1_a(k1); int k2_a(k2); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b4 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(g4_helper.addWex(w1, w_ex)); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = g4_helper.addKex(k1, k_ex); const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b3 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_up[i_b + ldgu * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_down[i_b + ldgd * j_b], conj_b); contribution = -(Ga_1 * Gb_1 + Ga_2 * Gb_2); } // Spin Difference Contribution // new scope to reuse local index variables { // contribution += (\sum_s G(k1, k1 + k_ex, s)) * (\sum_s G(k2 + k_ex, k2, s)) // TODO: pull into function, index setting code is identical for Spin cases int w1_a(w1); int w2_a(g4_helper.addWex(w1, w_ex)); int k1_a = k1; int k2_a = g4_helper.addKex(k1, k_ex); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b3 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga = cond_conj(G_up[i_a + ldgu * j_a] + G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(w2); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = k2; const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b4 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb = cond_conj(G_up[i_b + ldgu * j_b] + G_down[i_b + ldgd * j_b], conj_b); contribution += (Ga * Gb); } break; // The PARTICLE_HOLE_LONGITUDINAL_UP_UP contribution is computed in two parts: case FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_UP: { // contribution <- \sum_s G(k1, k1+k_ex, s) * G(k2+k_ex, k2, s) int w1_a(w1); int w2_a(g4_helper.addWex(w1, w_ex)); int k1_a = k1; int k2_a = g4_helper.addKex(k1, k_ex); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b3 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(w2); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = k2; const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b4 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_up[i_b + ldgd * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_down[i_b + ldgu * j_b], conj_b); contribution = (Ga_1 * Gb_1 + Ga_2 * Gb_2); } { // contribution <- -\sum_s G(k1, k2, s) * G(k2 + k_ex, k1 + k_ex, s) int w1_a(w1); int w2_a(w2); int k1_a(k1); int k2_a(k2); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b4 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(g4_helper.addWex(w1, w_ex)); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = g4_helper.addKex(k1, k_ex); const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b3 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_up[i_b + ldgd * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_down[i_b + ldgu * j_b], conj_b); contribution += -(Ga_1 * Gb_1 + Ga_2 * Gb_2); } break; case FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_DOWN: { // contribution <- \sum_s G(k1, k1+k_ex, s) * G(k2+k_ex, k2, -s) int w1_a(w1); int w2_a(g4_helper.addWex(w1, w_ex)); int k1_a = k1; int k2_a = g4_helper.addKex(k1, k_ex); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b3 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.addWex(w2, w_ex)); int w2_b(w2); int k1_b = g4_helper.addKex(k2, k_ex); int k2_b = k2; const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b4 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_down[i_b + ldgd * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_up[i_b + ldgu * j_b], conj_b); contribution = (Ga_1 * Gb_1 + Ga_2 * Gb_2); } break; case FourPointType::PARTICLE_PARTICLE_UP_DOWN: { // contribution <- -\sum_s G(k_ex - k2, k_ex - k1, s) * G(k2, k1, -s). int w1_a(w1); int w2_a(w2); int k1_a(k1); int k2_a(k2); const bool conj_a = g4_helper.extendGIndices(k1_a, k2_a, w1_a, w2_a); const int i_a = b1 + nb * k1_a + no * w1_a; const int j_a = b3 + nb * k2_a + no * w2_a; const CudaComplex<Real> Ga_1 = cond_conj(G_up[i_a + ldgu * j_a], conj_a); const CudaComplex<Real> Ga_2 = cond_conj(G_down[i_a + ldgd * j_a], conj_a); int w1_b(g4_helper.wexMinus(w1, w_ex)); int w2_b(g4_helper.wexMinus(w2, w_ex)); int k1_b = g4_helper.kexMinus(k1, k_ex); int k2_b = g4_helper.kexMinus(k2, k_ex); const bool conj_b = g4_helper.extendGIndices(k1_b, k2_b, w1_b, w2_b); const int i_b = b2 + nb * k1_b + no * w1_b; const int j_b = b4 + nb * k2_b + no * w2_b; const CudaComplex<Real> Gb_1 = cond_conj(G_down[i_b + ldgd * j_b], conj_b); const CudaComplex<Real> Gb_2 = cond_conj(G_up[i_b + ldgu * j_b], conj_b); contribution = (Ga_1 * Gb_1 + Ga_2 * Gb_2); } break; default: // abort asm("trap;"); } CudaComplex<Real>* const result_ptr = G4 + local_g4_index; if (atomic) dca::linalg::atomicAdd(result_ptr, contribution * 0.5f * static_cast<Real>(sign)); else *result_ptr += contribution * 0.5f * static_cast<Real>(sign); } template <typename Real, FourPointType type> float updateG4(std::complex<Real>* G4, const std::complex<Real>* G_up, const int ldgu, const std::complex<Real>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end) { constexpr const std::size_t n_threads = 256; const unsigned n_blocks = dca::util::ceilDiv(end - start, n_threads); updateG4Kernel<Real, type> <<<n_blocks, n_threads, 0, stream>>>(castCudaComplex(G4), castCudaComplex(G_up), ldgu, castCudaComplex(G_down), ldgd, sign, atomic, start, end); // Check for errors. auto err = cudaPeekAtLastError(); if (err != cudaSuccess) { linalg::util::printErrorMessage(err, __FUNCTION__, __FILE__, __LINE__); throw(std::runtime_error("CUDA failed to launch the G4 kernel.")); } const std::size_t n_updates = end - start; switch (type) { // Note: sign flips are ignored and a single complex * real multiplication is // present in all modes. case FourPointType::PARTICLE_HOLE_TRANSVERSE: // Each update of a G4 entry involves 2 complex additions and 2 complex multiplications. return 18. * n_updates; case FourPointType::PARTICLE_HOLE_MAGNETIC: // Each update of a G4 entry involves 3 complex additions and 3 complex multiplications. return 26. * n_updates; case FourPointType::PARTICLE_HOLE_CHARGE: // Each update of a G4 entry involves 3 complex additions and 3 complex multiplications. return 26. * n_updates; case FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_UP: // Each update of a G4 entry involves 3 complex additions and 4 complex multiplications. return 32 * n_updates; case FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_DOWN: // Each update of a G4 entry involves 2 complex additions and 2 complex multiplications. return 18. * n_updates; case FourPointType::PARTICLE_PARTICLE_UP_DOWN: // Each update of a G4 entry involves 2 complex additions and 2 complex multiplications. return 18. * n_updates; default: throw(std::logic_error("Invalid mode")); } } // Explicit instantiation. template void computeGSingleband<float>(std::complex<float>* G, int ldg, const std::complex<float>* G0, int nk, int nw, const float beta, cudaStream_t stream); template void computeGMultiband<float>(std::complex<float>* G, int ldg, const std::complex<float>* G0, int ldg0, int nb, int nk, int nw, float beta, cudaStream_t stream); template void computeGSingleband<double>(std::complex<double>* G, int ldg, const std::complex<double>* G0, int nk, int nw_pos, const double beta, cudaStream_t stream); template void computeGMultiband<double>(std::complex<double>* G, int ldg, const std::complex<double>* G0, int ldg0, int nb, int nk, int nw_pos, double beta, cudaStream_t stream); template float updateG4<float, FourPointType::PARTICLE_HOLE_TRANSVERSE>( std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); template float updateG4<float, FourPointType::PARTICLE_HOLE_MAGNETIC>( std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); template float updateG4<float, FourPointType::PARTICLE_HOLE_CHARGE>(std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); template float updateG4<float, FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_UP>( std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); template float updateG4<float, FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_DOWN>( std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); template float updateG4<float, FourPointType::PARTICLE_PARTICLE_UP_DOWN>( std::complex<float>* G4, const std::complex<float>* G_up, const int ldgu, const std::complex<float>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_HOLE_TRANSVERSE>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_HOLE_MAGNETIC>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_HOLE_CHARGE>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_UP>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_HOLE_LONGITUDINAL_UP_DOWN>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); template float updateG4<double, FourPointType::PARTICLE_PARTICLE_UP_DOWN>( std::complex<double>* G4, const std::complex<double>* G_up, const int ldgu, const std::complex<double>* G_down, const int ldgd, const int sign, bool atomic, cudaStream_t stream, std::size_t start, std::size_t end); } // namespace details } // namespace accumulator } // namespace solver } // namespace phys } // namespace dca
4864b09f0597713a6c8dff5ade10af372cf2368a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <hip/device_functions.h> //#include <cstdio> #include <opencv2/opencv.hpp> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <iostream> //named space for opencv and cout using namespace std; using namespace cv; //kernel function for gpu /* * components for the kernel function: * inout image data * output image data * image height * image width * transfer matrix in x direction * transfer matrix in y direction */ __global__ void sobelGpu(unsigned char *input, unsigned char *output, int imgH, int imgW, int *d_sobel_x, int *d_sobel_y) { //computing with multiple threads int xIndex = threadIdx.x + blockIdx.x * blockDim.x; int yIndex = threadIdx.y + blockIdx.y * blockDim.y; int offset = xIndex + yIndex * imgW; //gradient in x and y direction int Gx = 0; int Gy = 0; while(offset < (imgH - 2) * (imgW - 2)) { //gradient in x direction Gx = d_sobel_x[0] * input[(yIndex) * imgW + xIndex] + d_sobel_x[1] * input[(yIndex +1 ) * imgW + xIndex] + d_sobel_x[2] * input[(yIndex + 2) * imgW + xIndex] + d_sobel_x[3] * input[(yIndex) * imgW + xIndex + 1] + d_sobel_x[4] * input[(yIndex+1) * imgW + xIndex + 1] + d_sobel_x[5] * input[(yIndex + 2) * imgW + xIndex + 1] + d_sobel_x[6] * input[(yIndex) * imgW + xIndex + 2] + d_sobel_x[7] * input[(yIndex+1) * imgW + xIndex + 2] + d_sobel_x[8] * input[(yIndex + 2) * imgW + xIndex + 2]; //gradient in y direction Gy = d_sobel_y[0] * input[(yIndex) * imgW + xIndex] + d_sobel_y[1] * input[(yIndex +1 ) * imgW + xIndex] + d_sobel_y[2] * input[(yIndex + 2) * imgW + xIndex] + d_sobel_y[3] * input[(yIndex) * imgW + xIndex + 1] + d_sobel_y[4] * input[(yIndex+1) * imgW + xIndex + 1] + d_sobel_y[5] * input[(yIndex + 2) * imgW + xIndex + 1] + d_sobel_y[6] * input[(yIndex) * imgW + xIndex + 2] + d_sobel_y[7] * input[(yIndex+1) * imgW + xIndex + 2] + d_sobel_y[8] * input[(yIndex + 2) * imgW + xIndex + 2]; int sum = abs(Gx) + abs(Gy); // constrain the sum with 255 if (sum > 255) { sum = 255; } output[offset] = sum; xIndex += blockDim.x * gridDim.x; if(xIndex > imgW - 2) { yIndex += blockDim.y * gridDim.y; xIndex = threadIdx.x + blockIdx.x * blockDim.x; } offset = xIndex + yIndex * imgW; } } // the main function int main() { //read the input image, and transfer it in grayscal Mat gray_img = imread("test01.jpg", 0); // save the gray image /* save the gray image if needed */ //imwrite("Gray_Image.jpg", gray_img); //transfer matrix int sobel_x[3][3]; int sobel_y[3][3]; //image size, height and width int imgH = gray_img.rows; int imgW = gray_img.cols; //initialze the image after gauss filter Mat gaussImg; //implementation of the gauss filter with a 3 X 3 kernel GaussianBlur(gray_img, gaussImg, Size(3, 3), 0, 0, BORDER_DEFAULT); // save the gauss image /* save the image after gauss filter if needed */ //imwrite("gauss.jpg", gaussImg); // assign values to the x direction sobel_x[0][0] = -1; sobel_x[0][1] = 0; sobel_x[0][2] =1; sobel_x[1][0] = -2; sobel_x[1][1] = 0; sobel_x[1][2] =2; sobel_x[2][0] = -1; sobel_x[2][1] = 0; sobel_x[2][2] =1; // asign values to the y direction sobel_y[0][0] = -1; sobel_y[0][1] = -2; sobel_y[0][2] = -1; sobel_y[1][0] = 0; sobel_y[1][1] = 0; sobel_y[1][2] = 0; sobel_y[2][0] = 1; sobel_y[2][1] = 2; sobel_y[2][2] = 1; //the image for data after processed by GPU Mat out_img(imgH, imgW, CV_8UC1, Scalar(0)); /* implemetation for GPU kernel */ //device variables for transfer matrixes int *d_sobel_x; int *d_sobel_y; //device memory unsigned char *d_in; unsigned char *d_out; //recording the time hipEvent_t start, stop; hipEventCreate( &start ); hipEventCreate( &stop ); //start recording hipEventRecord( start, 0 ); //memory allocate hipMalloc((void**)&d_in, imgH * imgW * sizeof(unsigned char)); hipMalloc((void**)&d_out, imgH * imgW * sizeof(unsigned char)); hipMalloc((void**)&d_sobel_x, 9 * sizeof(int)); hipMalloc((void**)&d_sobel_y, 9 * sizeof(int)); //pass the image data into the GPU hipMemcpy(d_in, gaussImg.data, imgH * imgW * sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy((void*)d_sobel_x, (void*)sobel_x, 3 *3* sizeof(int), hipMemcpyHostToDevice); hipMemcpy((void*)d_sobel_y, (void*)sobel_y, 3 *3* sizeof(int), hipMemcpyHostToDevice); //dim3 threadsPerBlock(32, 32); //dim3 blocksPerGrid((imgW + threadsPerBlock.x - 1) / threadsPerBlock.x, (imgH + threadsPerBlock.y - 1) / threadsPerBlock.y); //define the dimentions dim3 blocks((int)((imgW+31)/32), (int)(imgH+31)/32); dim3 threads(16, 16); //call the kernel function hipLaunchKernelGGL(( sobelGpu) , dim3(blocks),dim3(threads), 0, 0, d_in, d_out, imgH, imgW, d_sobel_x, d_sobel_y); //sobelInCuda3 <<< 1,1 >>> (d_in, d_out, imgH, imgW); //pass the output image data back to host hipMemcpy(out_img.data, d_out, imgH * imgW * sizeof(unsigned char), hipMemcpyDeviceToHost); //stop recording time hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); //free memory hipFree(d_in); hipFree(d_out); //compute the time for execution float elapsedTime; hipEventElapsedTime( &elapsedTime, start, stop ); cout << "Time for execution with organized threads and block dimention is: " << static_cast<double>(elapsedTime) << " ms." <<endl; //printf( "The time for execution with ognized threads and block dimentions: %.6f ms \n", elapsedTime); hipEventDestroy( start ); hipEventDestroy( stop ); //save the output image imwrite("gpu2.jpg", out_img); return 0; }
4864b09f0597713a6c8dff5ade10af372cf2368a.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <device_functions.h> //#include <cstdio> #include <opencv2/opencv.hpp> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <iostream> //named space for opencv and cout using namespace std; using namespace cv; //kernel function for gpu /* * components for the kernel function: * inout image data * output image data * image height * image width * transfer matrix in x direction * transfer matrix in y direction */ __global__ void sobelGpu(unsigned char *input, unsigned char *output, int imgH, int imgW, int *d_sobel_x, int *d_sobel_y) { //computing with multiple threads int xIndex = threadIdx.x + blockIdx.x * blockDim.x; int yIndex = threadIdx.y + blockIdx.y * blockDim.y; int offset = xIndex + yIndex * imgW; //gradient in x and y direction int Gx = 0; int Gy = 0; while(offset < (imgH - 2) * (imgW - 2)) { //gradient in x direction Gx = d_sobel_x[0] * input[(yIndex) * imgW + xIndex] + d_sobel_x[1] * input[(yIndex +1 ) * imgW + xIndex] + d_sobel_x[2] * input[(yIndex + 2) * imgW + xIndex] + d_sobel_x[3] * input[(yIndex) * imgW + xIndex + 1] + d_sobel_x[4] * input[(yIndex+1) * imgW + xIndex + 1] + d_sobel_x[5] * input[(yIndex + 2) * imgW + xIndex + 1] + d_sobel_x[6] * input[(yIndex) * imgW + xIndex + 2] + d_sobel_x[7] * input[(yIndex+1) * imgW + xIndex + 2] + d_sobel_x[8] * input[(yIndex + 2) * imgW + xIndex + 2]; //gradient in y direction Gy = d_sobel_y[0] * input[(yIndex) * imgW + xIndex] + d_sobel_y[1] * input[(yIndex +1 ) * imgW + xIndex] + d_sobel_y[2] * input[(yIndex + 2) * imgW + xIndex] + d_sobel_y[3] * input[(yIndex) * imgW + xIndex + 1] + d_sobel_y[4] * input[(yIndex+1) * imgW + xIndex + 1] + d_sobel_y[5] * input[(yIndex + 2) * imgW + xIndex + 1] + d_sobel_y[6] * input[(yIndex) * imgW + xIndex + 2] + d_sobel_y[7] * input[(yIndex+1) * imgW + xIndex + 2] + d_sobel_y[8] * input[(yIndex + 2) * imgW + xIndex + 2]; int sum = abs(Gx) + abs(Gy); // constrain the sum with 255 if (sum > 255) { sum = 255; } output[offset] = sum; xIndex += blockDim.x * gridDim.x; if(xIndex > imgW - 2) { yIndex += blockDim.y * gridDim.y; xIndex = threadIdx.x + blockIdx.x * blockDim.x; } offset = xIndex + yIndex * imgW; } } // the main function int main() { //read the input image, and transfer it in grayscal Mat gray_img = imread("test01.jpg", 0); // save the gray image /* save the gray image if needed */ //imwrite("Gray_Image.jpg", gray_img); //transfer matrix int sobel_x[3][3]; int sobel_y[3][3]; //image size, height and width int imgH = gray_img.rows; int imgW = gray_img.cols; //initialze the image after gauss filter Mat gaussImg; //implementation of the gauss filter with a 3 X 3 kernel GaussianBlur(gray_img, gaussImg, Size(3, 3), 0, 0, BORDER_DEFAULT); // save the gauss image /* save the image after gauss filter if needed */ //imwrite("gauss.jpg", gaussImg); // assign values to the x direction sobel_x[0][0] = -1; sobel_x[0][1] = 0; sobel_x[0][2] =1; sobel_x[1][0] = -2; sobel_x[1][1] = 0; sobel_x[1][2] =2; sobel_x[2][0] = -1; sobel_x[2][1] = 0; sobel_x[2][2] =1; // asign values to the y direction sobel_y[0][0] = -1; sobel_y[0][1] = -2; sobel_y[0][2] = -1; sobel_y[1][0] = 0; sobel_y[1][1] = 0; sobel_y[1][2] = 0; sobel_y[2][0] = 1; sobel_y[2][1] = 2; sobel_y[2][2] = 1; //the image for data after processed by GPU Mat out_img(imgH, imgW, CV_8UC1, Scalar(0)); /* implemetation for GPU kernel */ //device variables for transfer matrixes int *d_sobel_x; int *d_sobel_y; //device memory unsigned char *d_in; unsigned char *d_out; //recording the time cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); //start recording cudaEventRecord( start, 0 ); //memory allocate cudaMalloc((void**)&d_in, imgH * imgW * sizeof(unsigned char)); cudaMalloc((void**)&d_out, imgH * imgW * sizeof(unsigned char)); cudaMalloc((void**)&d_sobel_x, 9 * sizeof(int)); cudaMalloc((void**)&d_sobel_y, 9 * sizeof(int)); //pass the image data into the GPU cudaMemcpy(d_in, gaussImg.data, imgH * imgW * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy((void*)d_sobel_x, (void*)sobel_x, 3 *3* sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy((void*)d_sobel_y, (void*)sobel_y, 3 *3* sizeof(int), cudaMemcpyHostToDevice); //dim3 threadsPerBlock(32, 32); //dim3 blocksPerGrid((imgW + threadsPerBlock.x - 1) / threadsPerBlock.x, (imgH + threadsPerBlock.y - 1) / threadsPerBlock.y); //define the dimentions dim3 blocks((int)((imgW+31)/32), (int)(imgH+31)/32); dim3 threads(16, 16); //call the kernel function sobelGpu <<<blocks,threads>>> (d_in, d_out, imgH, imgW, d_sobel_x, d_sobel_y); //sobelInCuda3 <<< 1,1 >>> (d_in, d_out, imgH, imgW); //pass the output image data back to host cudaMemcpy(out_img.data, d_out, imgH * imgW * sizeof(unsigned char), cudaMemcpyDeviceToHost); //stop recording time cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); //free memory cudaFree(d_in); cudaFree(d_out); //compute the time for execution float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, stop ); cout << "Time for execution with organized threads and block dimention is: " << static_cast<double>(elapsedTime) << " ms." <<endl; //printf( "The time for execution with ognized threads and block dimentions: %.6f ms \n", elapsedTime); cudaEventDestroy( start ); cudaEventDestroy( stop ); //save the output image imwrite("gpu2.jpg", out_img); return 0; }
031539f4f009e6cffc00424a7d5fa1da636d624d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <time.h> #include <malloc.h> #define CUDA_CHECK_RETURN(value) {\ hipError_t _m_cudaStat = value;\ if (_m_cudaStat != hipSuccess) {\ fprintf(stderr, "Error \"%s\" at line %d in file %s\n",\ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }\ } // const int N = 1 << 20; __global__ void gInitVectors(double* vector1, double* vector2) { for (int i = 0; i < N; i++) { vector1[i] = (double)i; //rand(); vector2[i] = (double)i; } } __global__ void gVectorAddition(double* vector1, double* vector2, double* vectorSum, int threads_cnt) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= N) return; vectorSum[i] = vector1[i] + vector2[i]; } float testingThreadsOfDevice(int threads_cnt, int type_time) { double *vectorSum_d, *vectorSum_h; vectorSum_h = (double*) calloc(N, sizeof(double)); CUDA_CHECK_RETURN(hipMalloc((void**)&vectorSum_d, N * sizeof(double))); double *vector1_d, *vector2_d; CUDA_CHECK_RETURN(hipMalloc((void**)&vector1_d, N * sizeof(double))); CUDA_CHECK_RETURN(hipMalloc((void**)&vector2_d, N * sizeof(double))); hipLaunchKernelGGL(( gInitVectors) , dim3(1), dim3(32) , 0, 0, vector1_d, vector2_d); CUDA_CHECK_RETURN(hipGetLastError()); float elapsedTime; struct timespec mt1, mt2; // type_time = 1 hipEvent_t start, stop; // type_time = 2 if (type_time == 1) { clock_gettime(CLOCK_REALTIME, &mt1); } else { hipEventCreate(&start); // hipEventCreate(&stop); // hipEventRecord(start,0); // () start } hipLaunchKernelGGL(( gVectorAddition) , dim3(N / threads_cnt), dim3(threads_cnt) , 0, 0, vector1_d, vector2_d, vectorSum_d, threads_cnt); // - GPU hipDeviceSynchronize(); // if (type_time == 1) { clock_gettime(CLOCK_REALTIME, &mt2); elapsedTime = (float)(mt2.tv_sec - mt1.tv_sec) + (float)(mt2.tv_nsec - mt1.tv_nsec) / 1e6; /// } else { hipEventRecord(stop,0); // stop hipEventSynchronize(stop); // CUDA_CHECK_RETURN(hipGetLastError()); hipEventElapsedTime(&elapsedTime,start,stop); // hipEventDestroy(start); // hipEventDestroy(stop); // CUDA_CHECK_RETURN(hipGetLastError()); } printf("blocks = %d, threads per block = %d milliseconds = %e \n", N / threads_cnt, threads_cnt, elapsedTime); /// : /// /*hipMemcpy(vectorSum_h, vectorSum_d, N * sizeof(double), hipMemcpyDeviceToHost); for (int i = 0; i < N; i++) fprintf(stderr, "%g ", vectorSum_h[i]); printf("\n"); */ hipFree(vector1_d); hipFree(vector2_d); hipFree(vectorSum_d); free(vectorSum_h); return elapsedTime; } int main() { for (int type_time = 1; type_time <= 2; type_time++) { float min_time, max_time, avg_time, cnt_tests = 1; // 32 : min_time = max_time = avg_time = testingThreadsOfDevice(32, type_time); for (int i = 64; i <= 1024; i *= 2) { float new_time = testingThreadsOfDevice(i, type_time); if (new_time > max_time) max_time = new_time; if (new_time < min_time) min_time = new_time; avg_time += new_time; cnt_tests++; } avg_time = avg_time / cnt_tests; if (type_time == 1) printf("\n time in milliseconds by clock_gettime:\n"); else printf("\n time in milliseconds by Events:\n"); printf("\t avg_time = %e min_time = %e max_time = %e\n\n", avg_time, min_time, max_time); } return 0; }
031539f4f009e6cffc00424a7d5fa1da636d624d.cu
#include <stdio.h> #include <time.h> #include <malloc.h> #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error \"%s\" at line %d in file %s\n",\ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }\ } //макрос для обработки ошибок const int N = 1 << 20; __global__ void gInitVectors(double* vector1, double* vector2) { for (int i = 0; i < N; i++) { vector1[i] = (double)i; //rand(); vector2[i] = (double)i; } } __global__ void gVectorAddition(double* vector1, double* vector2, double* vectorSum, int threads_cnt) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= N) return; vectorSum[i] = vector1[i] + vector2[i]; } float testingThreadsOfDevice(int threads_cnt, int type_time) { double *vectorSum_d, *vectorSum_h; vectorSum_h = (double*) calloc(N, sizeof(double)); CUDA_CHECK_RETURN(cudaMalloc((void**)&vectorSum_d, N * sizeof(double))); double *vector1_d, *vector2_d; CUDA_CHECK_RETURN(cudaMalloc((void**)&vector1_d, N * sizeof(double))); CUDA_CHECK_RETURN(cudaMalloc((void**)&vector2_d, N * sizeof(double))); gInitVectors <<< 1, 32 >>> (vector1_d, vector2_d); CUDA_CHECK_RETURN(cudaGetLastError()); float elapsedTime; struct timespec mt1, mt2; //для type_time = 1 cudaEvent_t start, stop; //для type_time = 2 if (type_time == 1) { clock_gettime(CLOCK_REALTIME, &mt1); } else { cudaEventCreate(&start); // инициализация cudaEventCreate(&stop); // событий cudaEventRecord(start,0); // привязка (регистрация) события start } gVectorAddition <<< N / threads_cnt, threads_cnt >>> (vector1_d, vector2_d, vectorSum_d, threads_cnt); //запуск фу-ии на GPU cudaDeviceSynchronize(); //синхронизация потоков if (type_time == 1) { clock_gettime(CLOCK_REALTIME, &mt2); elapsedTime = (float)(mt2.tv_sec - mt1.tv_sec) + (float)(mt2.tv_nsec - mt1.tv_nsec) / 1e6; ///время в миллисекундах } else { cudaEventRecord(stop,0); // привязка события stop cudaEventSynchronize(stop); // синхронизация по событию CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime,start,stop); // вычисление затраченного времени cudaEventDestroy(start); // освобождение cudaEventDestroy(stop); // памяти CUDA_CHECK_RETURN(cudaGetLastError()); } printf("blocks = %d, threads per block = %d milliseconds = %e \n", N / threads_cnt, threads_cnt, elapsedTime); /// проверка: /// /*cudaMemcpy(vectorSum_h, vectorSum_d, N * sizeof(double), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) fprintf(stderr, "%g ", vectorSum_h[i]); printf("\n"); */ cudaFree(vector1_d); cudaFree(vector2_d); cudaFree(vectorSum_d); free(vectorSum_h); return elapsedTime; } int main() { for (int type_time = 1; type_time <= 2; type_time++) { float min_time, max_time, avg_time, cnt_tests = 1; //запустить тест с 32 потоками на блок: min_time = max_time = avg_time = testingThreadsOfDevice(32, type_time); for (int i = 64; i <= 1024; i *= 2) { float new_time = testingThreadsOfDevice(i, type_time); if (new_time > max_time) max_time = new_time; if (new_time < min_time) min_time = new_time; avg_time += new_time; cnt_tests++; } avg_time = avg_time / cnt_tests; if (type_time == 1) printf("\n time in milliseconds by clock_gettime:\n"); else printf("\n time in milliseconds by Events:\n"); printf("\t avg_time = %e min_time = %e max_time = %e\n\n", avg_time, min_time, max_time); } return 0; }
e7ee7f0c15f5a76de131df6f2a46a14289091a29.hip
// !!! This is a file automatically generated by hipify!!! #include "Geometry/LibraryCUDA.cuh" #include "Geometry/Constants.hh" #include "Simulation/ScreenFunctionsCUDA.cuh" #include "Simulation/TrackGPU.cuh" #include "Simulation/Spawner.cuh" #include <cfloat> namespace na63 { namespace { __device__ __constant__ Float facFel = 5.21575064289; __device__ __constant__ Float preS1 = 1./(184.15*184.15); __device__ __constant__ Float Egsmall = 2.0 * MeV; __device__ inline Float Phi1(const Float delta) { Float screenVal; if (delta > 1.) screenVal = 21.12 - 4.184*logf(delta+0.952); else screenVal = 20.868 - delta*(3.242 - 0.625*delta); return screenVal; } __device__ inline Float Phi2(const Float delta) { Float screenVal; if (delta > 1.) screenVal = 21.12 - 4.184*logf(delta+0.952); else screenVal = 20.209 - delta*(1.930 + 0.086*delta); return screenVal; } typedef struct { Float xiLPM; Float phiLPM; Float gLPM; } LPMFunctions; __device__ LPMFunctions CalcLPMFunctions( const Float k, const Float eplusEnergy, const Float lpmEnergy, const Float Z) { LPMFunctions ret; // *** calculate lpm variable s & sprime *** // Klein eqs. (78) & (79) Float sprime = sqrt(0.125*k*lpmEnergy/(eplusEnergy*(k-eplusEnergy))); Float s1 = preS1*pow(Z,Float(0.6666666)); Float logS1 = 2./3.*logf(Z)-2.*facFel; Float logTS1 = 0.69314718056 + logS1; ret.xiLPM = 2.; if (sprime>1) ret.xiLPM = 1.; else if (sprime>sqrt(2.)*s1) { Float h = logf(sprime)/logTS1; ret.xiLPM = 1+h-0.08*(1-h)*(1-sqrt(1-h))/logTS1; } Float s0 = sprime/sqrt(ret.xiLPM); // G4cout<<"k="<<k<<" y="<<eplusEnergy/k<<G4endl; // G4cout<<"s0="<<s0<<G4endl; // *** calculate supression functions phi and G *** // Klein eqs. (77) Float s2=s0*s0; Float s3=s0*s2; Float s4=s2*s2; if (s0<0.1) { // high suppression limit ret.phiLPM = 6.*s0 - 18.84955592153876*s2 + 39.47841760435743*s3 - 57.69873135166053*s4; ret.gLPM = 37.69911184307752*s2 - 236.8705056261446*s3 + 807.7822389*s4; } else if (s0<1.9516) { // intermediate suppression // using eq.77 approxim. valid s0<2. ret.phiLPM = 1.-exp(-6.*s0*(1.+(3.-kPi)*s0) +s3/(0.623+0.795*s0+0.658*s2)); if (s0<0.415827397755) { // using eq.77 approxim. valid 0.07<s<2 Float psiLPM = 1-exp(-4*s0-8*s2/(1+3.936*s0+4.97*s2-0.05*s3+7.50*s4)); ret.gLPM = 3*psiLPM-2*ret.phiLPM; } else { // using alternative parametrisiation Float pre = -0.16072300849123999 + s0*3.7550300067531581 + s2*-1.7981383069010097 + s3*0.67282686077812381 + s4*-0.1207722909879257; ret.gLPM = tanh(pre); } } else { // low suppression limit valid s>2. ret.phiLPM = 1. - 0.0119048/s4; ret.gLPM = 1. - 0.0230655/s4; } // *** make sure suppression is smaller than 1 *** // *** caused by Migdal approximation in xi *** if (ret.xiLPM*ret.phiLPM>1. || s0>0.57) ret.xiLPM=1./ret.phiLPM; return ret; } __device__ void SampleSecondaries( GPUTrack* track, // MDJ: replaces MaterialCutsCouple kinda wrong, be aware... const ParticlePars* particle, const MaterialPars* material, const int index, const int child_index, hiprandState_t *rng_state, Float cut_energy = 2*kElectronMass, Float max_energy = FLT_MAX, bool fLPMflag = true) { // The secondaries e+e- energies are sampled using the Bethe - Heitler // cross sections with Coulomb correction. // A modified version of the random number techniques of Butcher & Messel // is used (Nuc Phys 20(1960),15). // // GEANT4 internal units. // // Note 1 : Effects due to the breakdown of the Born approximation at // low energy are ignored. // Note 2 : The differential cross section implicitly takes account of // pair creation in both nuclear and atomic electron fields. // However triplet prodution is not generated. Float GammaEnergy = track->momentum[3] - particle->mass; GPUThreeVector GammaDirection; ThreeVector_Normalized(GammaDirection,track->momentum); Float epsil; Float epsil0 = kElectronMass/GammaEnergy; if (epsil0 > 1.0) return; Float lpmEnergy = material->radiation_length*material->density*kLPMConstant; // select randomly one element constituing the material // const G4Element* anElement = SelectRandomAtom(aMaterial, theGamma, GammaEnergy); if (GammaEnergy < Egsmall) { epsil = epsil0 + (0.5-epsil0)*hiprand_uniform(rng_state); } else { // now comes the case with GammaEnergy >= 2. MeV // Extract Coulomb factor for this Element Float Z3 = pow(material->atomic_number,3); Float FZ = 8.*(Z3); if (GammaEnergy > 50.*MeV) FZ += 8.*(material->coulomb_correction); // limits of the screening variable Float screenfac = 136.*epsil0/(Z3); Float screenmax = exp((42.24 - FZ)/8.368) - 0.952 ; Float temp = 4.0*screenfac; Float screenmin = min(temp,screenmax); // limits of the energy sampling Float epsil1 = 0.5 - 0.5*sqrt(1. - screenmin/screenmax) ; Float epsilmin = max(epsil0,epsil1) , epsilrange = 0.5 - epsilmin; // // sample the energy rate of the created electron (or positron) // //Float epsil, screenvar, greject ; Float screenvar, greject; Float F10 = CUDA_ScreenFunction1(screenmin) - FZ; Float F20 = CUDA_ScreenFunction2(screenmin) - FZ; temp = 0.0; Float NormF1 = max(F10*epsilrange*epsilrange,temp); Float NormF2 = max(1.5*F20,0.); LPMFunctions lpm; do { if (NormF1/(NormF1+NormF2) > hiprand_uniform(rng_state)) { epsil = 0.5 - epsilrange*pow(hiprand_uniform(rng_state),Float(0.333333)); screenvar = screenfac/(epsil*(1-epsil)); if (fLPMflag && GammaEnergy>100.*GeV) { lpm = CalcLPMFunctions(GammaEnergy,GammaEnergy*epsil,lpmEnergy,Z); greject = lpm.xiLPM*((lpm.gLPM+2.*lpm.phiLPM)*Phi1(screenvar) - lpm.gLPM*Phi2(screenvar) - lpm.phiLPM*FZ)/F10; } else { greject = (CUDA_ScreenFunction1(screenvar) - FZ)/F10; } } else { epsil = epsilmin + epsilrange*hiprand_uniform(rng_state); screenvar = screenfac/(epsil*(1-epsil)); if (fLPMflag && GammaEnergy>100.*GeV) { lpm = CalcLPMFunctions(GammaEnergy,GammaEnergy*epsil,lpmEnergy,Z); greject = lpm.xiLPM*((0.5*lpm.gLPM+lpm.phiLPM)*Phi1(screenvar) + 0.5*lpm.gLPM*Phi2(screenvar) - 0.5*(lpm.gLPM+lpm.phiLPM)*FZ)/F20; // printf("gLPM: %f, phiLPM: %f, xiLPM: %f\n",gLPM,phiLPM,xiLPM); } else { greject = (CUDA_ScreenFunction2(screenvar) - FZ)/F20; } } } while (greject < hiprand_uniform(rng_state)); } // end of epsil sampling // // fixe charges randomly // Float ElectTotEnergy, PositTotEnergy; if (hiprand_uniform(rng_state) > 0.5) { ElectTotEnergy = (1.-epsil)*GammaEnergy; PositTotEnergy = epsil*GammaEnergy; } else { PositTotEnergy = (1.-epsil)*GammaEnergy; ElectTotEnergy = epsil*GammaEnergy; } // // scattered electron (positron) angles. ( Z - axis along the parent photon) // // universal distribution suggested by L. Urban // (Geant3 manual (1993) Phys211), // derived from Tsai distribution (Rev Mod Phys 49,421(1977)) Float u; const Float a1 = 0.625 , a2 = 3.*a1 , d = 27. ; if (9./(9.+d) > hiprand_uniform(rng_state)) u= - logf(hiprand_uniform(rng_state)*hiprand_uniform(rng_state))/a1; else u= - logf(hiprand_uniform(rng_state)*hiprand_uniform(rng_state))/a2; Float TetEl = u*kElectronMass/ElectTotEnergy; Float TetPo = u*kElectronMass/PositTotEnergy; Float Phi = 2.0 * kPi * hiprand_uniform(rng_state); Float dxEl= sin(TetEl)*cos(Phi),dyEl= sin(TetEl)*sin(Phi),dzEl=cos(TetEl); Float dxPo=-sin(TetPo)*cos(Phi),dyPo=-sin(TetPo)*sin(Phi),dzPo=cos(TetPo); // // kinematic of the created pair // // the electron and positron are assumed to have a symetric // angular distribution with respect to the Z axis along the parent photon. Float temp = 0.0; Float ElectKineEnergy = max(temp,ElectTotEnergy - kElectronMass); GPUThreeVector ElectDirection; ThreeVector_Set(ElectDirection,dxEl,dyEl,dzEl); ThreeVector_Rotate(ElectDirection,GammaDirection); ThreeVector_Extend(ElectDirection,sqrt(ElectTotEnergy*ElectTotEnergy-kElectronMass*kElectronMass)); // create G4DynamicParticle object for the particle1 // G4DynamicParticle* aParticle1= new G4DynamicParticle( // theElectron,ElectDirection,ElectKineEnergy); GPUTrack *aParticle1 = &tracks[child_index]; aParticle1->particle_id = 11; aParticle1->particle_index = electron_index; aParticle1->charge = -1; FourVector_Set(aParticle1->momentum,ElectDirection,ElectTotEnergy); // the e+ is always created (even with Ekine=0) for further annihilation. temp = 0.0; Float PositKineEnergy = max(temp,PositTotEnergy - kElectronMass); GPUThreeVector PositDirection; ThreeVector_Set(PositDirection,dxPo,dyPo,dzPo); ThreeVector_Rotate(PositDirection,GammaDirection); ThreeVector_Extend(PositDirection,sqrt(PositTotEnergy*PositTotEnergy-kElectronMass*kElectronMass)); // create G4DynamicParticle object for the particle2 // G4DynamicParticle* aParticle2= new G4DynamicParticle( // thePositron,PositDirection,PositKineEnergy); GPUTrack *aParticle2 = &tracks[child_index - 1]; aParticle2->particle_id = 11; aParticle2->particle_index = electron_index; aParticle2->charge = 1; FourVector_Set(aParticle2->momentum,PositDirection,PositTotEnergy); // Kill photon CUDA_SetEnergy(track->momentum,0.0,0.0,index); // Spawn children SpawnChild(*track,child_index,kElectronMass); SpawnChild(*track,child_index-1,kElectronMass); } } // End unnamed namespace __device__ void CUDA_GEANT4PairProduction( GPUTrack* track, const ParticlePars* particle, const MaterialPars* material, const Float dl, hiprandState_t *rng_state, const int index) { if (track->particle_id != 22) return; int child_index = CanHaveTwoChildren(index); // Must be able to spawn two children if (child_index == -1) { UpdateState(index,WAITING); return; } // Use radiation length for probability Float chance_to_interact = 1 - exp(-dl/material->radiation_length); if (hiprand_uniform(rng_state) > chance_to_interact) return; SampleSecondaries(track,particle,material,index,child_index,rng_state); } } // End namespace na63
e7ee7f0c15f5a76de131df6f2a46a14289091a29.cu
#include "Geometry/LibraryCUDA.cuh" #include "Geometry/Constants.hh" #include "Simulation/ScreenFunctionsCUDA.cuh" #include "Simulation/TrackGPU.cuh" #include "Simulation/Spawner.cuh" #include <cfloat> namespace na63 { namespace { __device__ __constant__ Float facFel = 5.21575064289; __device__ __constant__ Float preS1 = 1./(184.15*184.15); __device__ __constant__ Float Egsmall = 2.0 * MeV; __device__ inline Float Phi1(const Float delta) { Float screenVal; if (delta > 1.) screenVal = 21.12 - 4.184*logf(delta+0.952); else screenVal = 20.868 - delta*(3.242 - 0.625*delta); return screenVal; } __device__ inline Float Phi2(const Float delta) { Float screenVal; if (delta > 1.) screenVal = 21.12 - 4.184*logf(delta+0.952); else screenVal = 20.209 - delta*(1.930 + 0.086*delta); return screenVal; } typedef struct { Float xiLPM; Float phiLPM; Float gLPM; } LPMFunctions; __device__ LPMFunctions CalcLPMFunctions( const Float k, const Float eplusEnergy, const Float lpmEnergy, const Float Z) { LPMFunctions ret; // *** calculate lpm variable s & sprime *** // Klein eqs. (78) & (79) Float sprime = sqrt(0.125*k*lpmEnergy/(eplusEnergy*(k-eplusEnergy))); Float s1 = preS1*pow(Z,Float(0.6666666)); Float logS1 = 2./3.*logf(Z)-2.*facFel; Float logTS1 = 0.69314718056 + logS1; ret.xiLPM = 2.; if (sprime>1) ret.xiLPM = 1.; else if (sprime>sqrt(2.)*s1) { Float h = logf(sprime)/logTS1; ret.xiLPM = 1+h-0.08*(1-h)*(1-sqrt(1-h))/logTS1; } Float s0 = sprime/sqrt(ret.xiLPM); // G4cout<<"k="<<k<<" y="<<eplusEnergy/k<<G4endl; // G4cout<<"s0="<<s0<<G4endl; // *** calculate supression functions phi and G *** // Klein eqs. (77) Float s2=s0*s0; Float s3=s0*s2; Float s4=s2*s2; if (s0<0.1) { // high suppression limit ret.phiLPM = 6.*s0 - 18.84955592153876*s2 + 39.47841760435743*s3 - 57.69873135166053*s4; ret.gLPM = 37.69911184307752*s2 - 236.8705056261446*s3 + 807.7822389*s4; } else if (s0<1.9516) { // intermediate suppression // using eq.77 approxim. valid s0<2. ret.phiLPM = 1.-exp(-6.*s0*(1.+(3.-kPi)*s0) +s3/(0.623+0.795*s0+0.658*s2)); if (s0<0.415827397755) { // using eq.77 approxim. valid 0.07<s<2 Float psiLPM = 1-exp(-4*s0-8*s2/(1+3.936*s0+4.97*s2-0.05*s3+7.50*s4)); ret.gLPM = 3*psiLPM-2*ret.phiLPM; } else { // using alternative parametrisiation Float pre = -0.16072300849123999 + s0*3.7550300067531581 + s2*-1.7981383069010097 + s3*0.67282686077812381 + s4*-0.1207722909879257; ret.gLPM = tanh(pre); } } else { // low suppression limit valid s>2. ret.phiLPM = 1. - 0.0119048/s4; ret.gLPM = 1. - 0.0230655/s4; } // *** make sure suppression is smaller than 1 *** // *** caused by Migdal approximation in xi *** if (ret.xiLPM*ret.phiLPM>1. || s0>0.57) ret.xiLPM=1./ret.phiLPM; return ret; } __device__ void SampleSecondaries( GPUTrack* track, // MDJ: replaces MaterialCutsCouple kinda wrong, be aware... const ParticlePars* particle, const MaterialPars* material, const int index, const int child_index, curandState *rng_state, Float cut_energy = 2*kElectronMass, Float max_energy = FLT_MAX, bool fLPMflag = true) { // The secondaries e+e- energies are sampled using the Bethe - Heitler // cross sections with Coulomb correction. // A modified version of the random number techniques of Butcher & Messel // is used (Nuc Phys 20(1960),15). // // GEANT4 internal units. // // Note 1 : Effects due to the breakdown of the Born approximation at // low energy are ignored. // Note 2 : The differential cross section implicitly takes account of // pair creation in both nuclear and atomic electron fields. // However triplet prodution is not generated. Float GammaEnergy = track->momentum[3] - particle->mass; GPUThreeVector GammaDirection; ThreeVector_Normalized(GammaDirection,track->momentum); Float epsil; Float epsil0 = kElectronMass/GammaEnergy; if (epsil0 > 1.0) return; Float lpmEnergy = material->radiation_length*material->density*kLPMConstant; // select randomly one element constituing the material // const G4Element* anElement = SelectRandomAtom(aMaterial, theGamma, GammaEnergy); if (GammaEnergy < Egsmall) { epsil = epsil0 + (0.5-epsil0)*curand_uniform(rng_state); } else { // now comes the case with GammaEnergy >= 2. MeV // Extract Coulomb factor for this Element Float Z3 = pow(material->atomic_number,3); Float FZ = 8.*(Z3); if (GammaEnergy > 50.*MeV) FZ += 8.*(material->coulomb_correction); // limits of the screening variable Float screenfac = 136.*epsil0/(Z3); Float screenmax = exp((42.24 - FZ)/8.368) - 0.952 ; Float temp = 4.0*screenfac; Float screenmin = min(temp,screenmax); // limits of the energy sampling Float epsil1 = 0.5 - 0.5*sqrt(1. - screenmin/screenmax) ; Float epsilmin = max(epsil0,epsil1) , epsilrange = 0.5 - epsilmin; // // sample the energy rate of the created electron (or positron) // //Float epsil, screenvar, greject ; Float screenvar, greject; Float F10 = CUDA_ScreenFunction1(screenmin) - FZ; Float F20 = CUDA_ScreenFunction2(screenmin) - FZ; temp = 0.0; Float NormF1 = max(F10*epsilrange*epsilrange,temp); Float NormF2 = max(1.5*F20,0.); LPMFunctions lpm; do { if (NormF1/(NormF1+NormF2) > curand_uniform(rng_state)) { epsil = 0.5 - epsilrange*pow(curand_uniform(rng_state),Float(0.333333)); screenvar = screenfac/(epsil*(1-epsil)); if (fLPMflag && GammaEnergy>100.*GeV) { lpm = CalcLPMFunctions(GammaEnergy,GammaEnergy*epsil,lpmEnergy,Z); greject = lpm.xiLPM*((lpm.gLPM+2.*lpm.phiLPM)*Phi1(screenvar) - lpm.gLPM*Phi2(screenvar) - lpm.phiLPM*FZ)/F10; } else { greject = (CUDA_ScreenFunction1(screenvar) - FZ)/F10; } } else { epsil = epsilmin + epsilrange*curand_uniform(rng_state); screenvar = screenfac/(epsil*(1-epsil)); if (fLPMflag && GammaEnergy>100.*GeV) { lpm = CalcLPMFunctions(GammaEnergy,GammaEnergy*epsil,lpmEnergy,Z); greject = lpm.xiLPM*((0.5*lpm.gLPM+lpm.phiLPM)*Phi1(screenvar) + 0.5*lpm.gLPM*Phi2(screenvar) - 0.5*(lpm.gLPM+lpm.phiLPM)*FZ)/F20; // printf("gLPM: %f, phiLPM: %f, xiLPM: %f\n",gLPM,phiLPM,xiLPM); } else { greject = (CUDA_ScreenFunction2(screenvar) - FZ)/F20; } } } while (greject < curand_uniform(rng_state)); } // end of epsil sampling // // fixe charges randomly // Float ElectTotEnergy, PositTotEnergy; if (curand_uniform(rng_state) > 0.5) { ElectTotEnergy = (1.-epsil)*GammaEnergy; PositTotEnergy = epsil*GammaEnergy; } else { PositTotEnergy = (1.-epsil)*GammaEnergy; ElectTotEnergy = epsil*GammaEnergy; } // // scattered electron (positron) angles. ( Z - axis along the parent photon) // // universal distribution suggested by L. Urban // (Geant3 manual (1993) Phys211), // derived from Tsai distribution (Rev Mod Phys 49,421(1977)) Float u; const Float a1 = 0.625 , a2 = 3.*a1 , d = 27. ; if (9./(9.+d) > curand_uniform(rng_state)) u= - logf(curand_uniform(rng_state)*curand_uniform(rng_state))/a1; else u= - logf(curand_uniform(rng_state)*curand_uniform(rng_state))/a2; Float TetEl = u*kElectronMass/ElectTotEnergy; Float TetPo = u*kElectronMass/PositTotEnergy; Float Phi = 2.0 * kPi * curand_uniform(rng_state); Float dxEl= sin(TetEl)*cos(Phi),dyEl= sin(TetEl)*sin(Phi),dzEl=cos(TetEl); Float dxPo=-sin(TetPo)*cos(Phi),dyPo=-sin(TetPo)*sin(Phi),dzPo=cos(TetPo); // // kinematic of the created pair // // the electron and positron are assumed to have a symetric // angular distribution with respect to the Z axis along the parent photon. Float temp = 0.0; Float ElectKineEnergy = max(temp,ElectTotEnergy - kElectronMass); GPUThreeVector ElectDirection; ThreeVector_Set(ElectDirection,dxEl,dyEl,dzEl); ThreeVector_Rotate(ElectDirection,GammaDirection); ThreeVector_Extend(ElectDirection,sqrt(ElectTotEnergy*ElectTotEnergy-kElectronMass*kElectronMass)); // create G4DynamicParticle object for the particle1 // G4DynamicParticle* aParticle1= new G4DynamicParticle( // theElectron,ElectDirection,ElectKineEnergy); GPUTrack *aParticle1 = &tracks[child_index]; aParticle1->particle_id = 11; aParticle1->particle_index = electron_index; aParticle1->charge = -1; FourVector_Set(aParticle1->momentum,ElectDirection,ElectTotEnergy); // the e+ is always created (even with Ekine=0) for further annihilation. temp = 0.0; Float PositKineEnergy = max(temp,PositTotEnergy - kElectronMass); GPUThreeVector PositDirection; ThreeVector_Set(PositDirection,dxPo,dyPo,dzPo); ThreeVector_Rotate(PositDirection,GammaDirection); ThreeVector_Extend(PositDirection,sqrt(PositTotEnergy*PositTotEnergy-kElectronMass*kElectronMass)); // create G4DynamicParticle object for the particle2 // G4DynamicParticle* aParticle2= new G4DynamicParticle( // thePositron,PositDirection,PositKineEnergy); GPUTrack *aParticle2 = &tracks[child_index - 1]; aParticle2->particle_id = 11; aParticle2->particle_index = electron_index; aParticle2->charge = 1; FourVector_Set(aParticle2->momentum,PositDirection,PositTotEnergy); // Kill photon CUDA_SetEnergy(track->momentum,0.0,0.0,index); // Spawn children SpawnChild(*track,child_index,kElectronMass); SpawnChild(*track,child_index-1,kElectronMass); } } // End unnamed namespace __device__ void CUDA_GEANT4PairProduction( GPUTrack* track, const ParticlePars* particle, const MaterialPars* material, const Float dl, curandState *rng_state, const int index) { if (track->particle_id != 22) return; int child_index = CanHaveTwoChildren(index); // Must be able to spawn two children if (child_index == -1) { UpdateState(index,WAITING); return; } // Use radiation length for probability Float chance_to_interact = 1 - exp(-dl/material->radiation_length); if (curand_uniform(rng_state) > chance_to_interact) return; SampleSecondaries(track,particle,material,index,child_index,rng_state); } } // End namespace na63
feae70899b9b7ca60e7389f7fa75e1092c01cbbc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _WIN32 # define EXPORT __declspec(dllexport) #else # define EXPORT #endif void __global__ file2_kernel(int x, int& r) { r = -x; } EXPORT int file2_launch_kernel(int x) { int r = 0; hipLaunchKernelGGL(( file2_kernel), dim3(1), dim3(1), 0, 0, x, r); return r; }
feae70899b9b7ca60e7389f7fa75e1092c01cbbc.cu
#ifdef _WIN32 # define EXPORT __declspec(dllexport) #else # define EXPORT #endif void __global__ file2_kernel(int x, int& r) { r = -x; } EXPORT int file2_launch_kernel(int x) { int r = 0; file2_kernel<<<1, 1>>>(x, r); return r; }
6fe09d1bbb95bb41bf1c088e4234d138190fbf8c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Copyright (c) 2012, Mikhail Sirotenko <[email protected]> //All rights reserved. // //Redistribution and use in source and binary forms, with or without //modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // //THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND //ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED //WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE //DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY //DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES //(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; //LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND //ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT //(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS //SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <stdexcept> #include <rocblas.h> #include "../precomp.hpp" namespace cudacnn { template class FLayer<TensorGPU, float, TansigMod<float> >; template class FLayer<TensorGPU, float, Tansig<float> >; template class FLayer<TensorGPU, float, Purelin<float> >; template class FLayer<TensorGPU, double, TansigMod<double> >; template class FLayer<TensorGPU, double, Tansig<double> >; template class FLayer<TensorGPU, double, Purelin<double> >; #ifdef HAVE_CUDA template<class T> void ApplyWeightsTemplate(const hipblasHandle_t& handle, const TensorGPU<T>& layer_input, const TensorGPU<T>& weights, const TensorGPU<T>& biases, TensorGPU<T>& out ); template<> void ApplyWeightsTemplate<float>(const hipblasHandle_t& handle, const TensorGPU<float>& layer_input, const TensorGPU<float>& weights, const TensorGPU<float>& biases, TensorGPU<float>& out ) { //Copy biases to out because of sgemm() syntax uses C as a bias and the output cutilSafeCall(hipMemcpy(out.data(),biases.data(),sizeof(float)*biases.num_elements(), hipMemcpyDeviceToDevice)); //Flatten the input const float alpha = 1.0; const float beta = 1.0; hipblasStatus_t ret = hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, 1,weights.w(), layer_input.num_elements(), &alpha, layer_input.data(), layer_input.num_elements(), weights.data(), weights.w(), &beta, out.data(), out.h()); cublasCheckMsg(ret, "cublas Sgemm returned an error!\n"); } template<> void ApplyWeightsTemplate<double>(const hipblasHandle_t& handle, const TensorGPU<double>& layer_input, const TensorGPU<double>& weights, const TensorGPU<double>& biases, TensorGPU<double>& out ) { //Copy biases to out because of sgemm() syntax uses C as a bias and the output cutilSafeCall(hipMemcpy(out.data(),biases.data(),sizeof(double)*biases.num_elements(), hipMemcpyDeviceToDevice)); //Flatten the input const double alpha = 1.0; const double beta = 1.0; hipblasStatus_t ret = hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, 1,weights.w(), layer_input.num_elements(), &alpha, layer_input.data(), layer_input.num_elements(), weights.data(), weights.w(), &beta, out.data(), out.h()); cublasCheckMsg(ret, "cublas Sgemm returned an error!\n"); } template<class T, class TF> void FLayer<TensorGPU,T, TF>::Propagate(const TensorGPU<T>& layer_input ) { //Flatten the output of previous layer TensorGPU<T> flat_input(layer_input, true); flat_input.Flatten(); assert(flat_input.num_elements() == this->weights().h()); ApplyWeightsTemplate<T>(cublas_handle_, flat_input, this->weights(), this->biases(), this->out_); dim3 blocks(iDivUp(this->out().num_elements(),MAX_THREADS),1,1); dim3 threads(MAX_THREADS,1,1); hipLaunchKernelGGL(( ApplyTransferFunction<T,TF>), dim3(blocks), dim3(threads), 0, 0, this->out_, this->out_); } template <class T, int nthreads, bool hessian> __global__ void FLayerBackpropagateKernel(TensorDev<T> dedy, TensorDev<T> input, TensorDev<T> weights, TensorDev<T> de_dw, TensorDev<T> de_db, TensorDev<T> de_dx_prev) { //volatile __shared__ T smem[nthreads]; T* smem = SharedMemory<T>(); int tx = threadIdx.x; int by = blockIdx.y; int x = tx; int y = by; int tid = tx; smem[tid] = 0; if(x < dedy.num_elements()){ smem[tid] = hessian ? dedy[x]*Sqr(weights(x,y)) : dedy[x]*weights(x,y); //Gradients de_dw(x,y) = hessian ? de_dw(x,y) + dedy[x]*Sqr(input[y]): dedy[x]*input[y]; if(y == 0) de_db[x] = hessian ? de_db[x] + dedy[x] : dedy[x]; } volatile T* vsmem = smem; SmemReduce<T, nthreads>(vsmem, tid); __syncthreads(); //Copy to destination if(x==0) de_dx_prev[y] = smem[0]; __syncthreads(); } //Only compute derrivative without backpropagation template <class T, bool hessian> __global__ void FLayerComputeDerrivKernel(TensorDev<T> dedy, TensorDev<T> input, TensorDev<T> de_dw, TensorDev<T> de_db) { int tx = threadIdx.x; int by = blockIdx.y; int x = tx; int y = by; if(x < dedy.num_elements()){ //Gradients de_dw(x,y) = hessian ? de_dw(x,y) + dedy[x]*Sqr(input[y]): dedy[x]*input[y]; if(y == 0) de_db[x] = hessian ? de_db[x] + dedy[x] : dedy[x]; } } template <class T, class TF> template <bool hessian> inline void FLayer<TensorGPU,T, TF>::BackpropagateKernelProxy(const TensorGPU<T>& input, TensorGPU<T>& dedx_prev) { const TensorGPU<T>& de_dw_in = hessian ? this->d2e_dw2() : this->de_dw(); const TensorGPU<T>& de_db_in = hessian ? this->d2e_db2() : this->de_db(); const TensorGPU<T>& de_dx_in = hessian ? this->d2e_dx2() : this->de_dx(); dim3 threads(MAX_THREADS); dim3 blocks(iDivUp(de_dx_in.num_elements(), MAX_THREADS)); TensorGPU<T> dedy(de_dx_in); hipLaunchKernelGGL(( ApplyTransferFunctionDerriv<T, TF, hessian>), dim3(blocks), dim3(threads), 0, 0, this->out(), de_dx_in, dedy); //TODO: fix this. Weights width can be greather than 1024 int nthreads = iRoundUpPow2(this->weights().w()); threads = dim3(nthreads,1,1); blocks = dim3(1, this->weights().h(),1); size_t smem_size = ::max(nthreads*sizeof(T), 64*sizeof(T)); switch(threads.x) { case 1 :hipLaunchKernelGGL(( FLayerBackpropagateKernel<T, 1 , hessian>), dim3(blocks), dim3(threads), smem_size, 0, dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 2 :hipLaunchKernelGGL(( FLayerBackpropagateKernel<T,2 , hessian>), dim3(blocks), dim3(threads), smem_size, 0, dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 4 :hipLaunchKernelGGL(( FLayerBackpropagateKernel<T,4 , hessian>), dim3(blocks), dim3(threads), smem_size, 0, dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 8 :hipLaunchKernelGGL(( FLayerBackpropagateKernel<T,8 , hessian>), dim3(blocks), dim3(threads), smem_size, 0, dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 16 :hipLaunchKernelGGL(( FLayerBackpropagateKernel<T,16 , hessian>), dim3(blocks), dim3(threads), smem_size, 0, dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 32 :hipLaunchKernelGGL(( FLayerBackpropagateKernel<T,32 , hessian>), dim3(blocks), dim3(threads), smem_size, 0, dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 64 :hipLaunchKernelGGL(( FLayerBackpropagateKernel<T,64 , hessian>), dim3(blocks), dim3(threads), smem_size, 0, dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 128:hipLaunchKernelGGL(( FLayerBackpropagateKernel<T,128 , hessian>), dim3(blocks), dim3(threads), smem_size, 0, dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 256:hipLaunchKernelGGL(( FLayerBackpropagateKernel<T,256 , hessian>), dim3(blocks), dim3(threads), smem_size, 0, dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; default: throw std::runtime_error("Incorrect threads number in BackpropagateKernelProxy"); } cutilCheckMsg("Failed to apply transfer function in FLayer"); } template <class T, class TF> void FLayer<TensorGPU,T, TF>::BackPropagate(const TensorGPU<T>& input, TensorGPU<T>& dedx_prev) { //Flatten the output of previous layer TensorGPU<T> flat_input(input, true); flat_input.Flatten(); TensorGPU<T> flat_dedx_prev(dedx_prev, true); flat_dedx_prev.Flatten(); assert(flat_input.num_elements() == this->weights().h()); assert(de_dw_.HaveSameSize(this->weights())); assert(de_db_.HaveSameSize(this->biases())); //Require only the same number of elements, since last CLayer usually flattened assert(flat_dedx_prev.num_elements() == flat_input.num_elements()); //TODO: Remove the limitation assert(weights().w() <= MAX_THREADS); BackpropagateKernelProxy<false>(flat_input, flat_dedx_prev); } template <class T, class TF> void FLayer<TensorGPU,T, TF>::BackPropagateHessian(const TensorGPU<T>& input, TensorGPU<T>& d2edx2_prev) { //Flatten the output of previous layer TensorGPU<T> flat_input(input, true); flat_input.Flatten(); TensorGPU<T> flat_d2edx2_prev(d2edx2_prev, true); flat_d2edx2_prev.Flatten(); assert(flat_input.num_elements() == this->weights().h()); assert(this->d2e_dw2_.HaveSameSize(this->weights())); assert(this->d2e_db2_.HaveSameSize(this->biases())); assert(flat_d2edx2_prev.num_elements() == flat_input.num_elements()); //Assume this for now. If the weights matrix is bigger than use cublas method assert(this->weights().w() <= MAX_THREADS); BackpropagateKernelProxy<true>(flat_input, flat_d2edx2_prev); //cutilSafeCall(hipDeviceSynchronize()); this->num_hessian_accums_++; } template <class T, class TF> void FLayer<TensorGPU,T, TF>::AverageHessian() { if(this->num_hessian_accums_) { dim3 threads(min(512,this->d2e_dw2_.num_elements()),1,1); dim3 blocks(iDivUp(this->d2e_dw2_.num_elements(),512)); hipLaunchKernelGGL(( Average<T>), dim3(blocks), dim3(threads), 0, 0, this->d2e_dw2_, this->num_hessian_accums_); threads = dim3(min(512,this->d2e_db2_.num_elements()),1,1); blocks = dim3(iDivUp(this->d2e_db2_.num_elements(),512)); hipLaunchKernelGGL(( Average<T>), dim3(blocks), dim3(threads), 0, 0, this->d2e_db2_, this->num_hessian_accums_); this->num_hessian_accums_ = 0; } } template <class T, class TF> void FLayer<TensorGPU,T, TF>::AdaptWeights(T tau, bool use_hessian, T mu) { dim3 threads(MAX_THREADS); if(use_hessian){ dim3 blocks(iDivUp(this->weights().num_elements(),MAX_THREADS)); hipLaunchKernelGGL(( AdaptWeightsKernel<T>), dim3(threads),dim3(blocks), 0, 0, this->weights(), tau, mu, this->de_dw(), this->d2e_dw2()); blocks = dim3(iDivUp(this->biases().num_elements(),MAX_THREADS)); hipLaunchKernelGGL(( AdaptWeightsKernel<T>), dim3(threads),dim3(blocks), 0, 0, this->biases(), tau, mu, this->de_db(), this->d2e_db2()); }else{ dim3 blocks(iDivUp(this->weights().num_elements(),MAX_THREADS)); hipLaunchKernelGGL(( AdaptWeightsKernel<T>), dim3(threads),dim3(blocks), 0, 0, this->weights(), tau, this->de_dw()); blocks = dim3(iDivUp(this->biases().num_elements(),MAX_THREADS)); hipLaunchKernelGGL(( AdaptWeightsKernel<T>), dim3(threads),dim3(blocks), 0, 0, this->biases(), tau, this->de_db()); } } template <class T, class TF> template <bool hessian> void FLayer<TensorGPU, T, TF>::ComputeGradientKernelProxy(const TensorGPU<T>& input) { const TensorGPU<T>& de_dw_in = hessian ? this->d2e_dw2() : this->de_dw(); const TensorGPU<T>& de_db_in = hessian ? this->d2e_db2() : this->de_db(); const TensorGPU<T>& de_dx_in = hessian ? this->d2e_dx2() : this->de_dx(); dim3 threads(MAX_THREADS); dim3 blocks(iDivUp(de_dx_in.num_elements(), MAX_THREADS)); TensorGPU<T> dedy(de_dx_in); hipLaunchKernelGGL(( ApplyTransferFunctionDerriv<T, TF, hessian>), dim3(blocks), dim3(threads), 0, 0, this->out(), de_dx_in, dedy); //TODO: fix this. Weights width can be greather than 1024 threads = dim3(iRoundUpPow2(this->weights().w()),1,1); blocks = dim3(1, this->weights().h(),1); hipLaunchKernelGGL(( FLayerComputeDerrivKernel<T, hessian>), dim3(blocks), dim3(threads), 0, 0, dedy, input, de_dw_in, de_db_in); cutilCheckMsg("Failed to compute derrivative in FLayer"); } /* Compute gradient without backpropagating errors */ template <class T, class TF> void FLayer<TensorGPU, T, TF>::ComputeGradient(const TensorGPU<T>& input) { //Flatten the output of previous layer TensorGPU<T> flat_input(input, true); flat_input.Flatten(); ComputeGradientKernelProxy<false>(flat_input); } /* Compute Hessian without backpropagating errors */ template <class T, class TF> void FLayer<TensorGPU, T, TF>::ComputeHessian(const TensorGPU<T>& input) { //Flatten the output of previous layer TensorGPU<T> flat_input(input, true); flat_input.Flatten(); ComputeGradientKernelProxy<true>(flat_input); this->num_hessian_accums_++; } #endif //HAVE_CUDA }
6fe09d1bbb95bb41bf1c088e4234d138190fbf8c.cu
//Copyright (c) 2012, Mikhail Sirotenko <[email protected]> //All rights reserved. // //Redistribution and use in source and binary forms, with or without //modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // //THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND //ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED //WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE //DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY //DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES //(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; //LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND //ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT //(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS //SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <stdexcept> #include <cublas_v2.h> #include "../precomp.hpp" namespace cudacnn { template class FLayer<TensorGPU, float, TansigMod<float> >; template class FLayer<TensorGPU, float, Tansig<float> >; template class FLayer<TensorGPU, float, Purelin<float> >; template class FLayer<TensorGPU, double, TansigMod<double> >; template class FLayer<TensorGPU, double, Tansig<double> >; template class FLayer<TensorGPU, double, Purelin<double> >; #ifdef HAVE_CUDA template<class T> void ApplyWeightsTemplate(const cublasHandle_t& handle, const TensorGPU<T>& layer_input, const TensorGPU<T>& weights, const TensorGPU<T>& biases, TensorGPU<T>& out ); template<> void ApplyWeightsTemplate<float>(const cublasHandle_t& handle, const TensorGPU<float>& layer_input, const TensorGPU<float>& weights, const TensorGPU<float>& biases, TensorGPU<float>& out ) { //Copy biases to out because of sgemm() syntax uses C as a bias and the output cutilSafeCall(cudaMemcpy(out.data(),biases.data(),sizeof(float)*biases.num_elements(), cudaMemcpyDeviceToDevice)); //Flatten the input const float alpha = 1.0; const float beta = 1.0; cublasStatus_t ret = cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_T, 1,weights.w(), layer_input.num_elements(), &alpha, layer_input.data(), layer_input.num_elements(), weights.data(), weights.w(), &beta, out.data(), out.h()); cublasCheckMsg(ret, "cublas Sgemm returned an error!\n"); } template<> void ApplyWeightsTemplate<double>(const cublasHandle_t& handle, const TensorGPU<double>& layer_input, const TensorGPU<double>& weights, const TensorGPU<double>& biases, TensorGPU<double>& out ) { //Copy biases to out because of sgemm() syntax uses C as a bias and the output cutilSafeCall(cudaMemcpy(out.data(),biases.data(),sizeof(double)*biases.num_elements(), cudaMemcpyDeviceToDevice)); //Flatten the input const double alpha = 1.0; const double beta = 1.0; cublasStatus_t ret = cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_T, 1,weights.w(), layer_input.num_elements(), &alpha, layer_input.data(), layer_input.num_elements(), weights.data(), weights.w(), &beta, out.data(), out.h()); cublasCheckMsg(ret, "cublas Sgemm returned an error!\n"); } template<class T, class TF> void FLayer<TensorGPU,T, TF>::Propagate(const TensorGPU<T>& layer_input ) { //Flatten the output of previous layer TensorGPU<T> flat_input(layer_input, true); flat_input.Flatten(); assert(flat_input.num_elements() == this->weights().h()); ApplyWeightsTemplate<T>(cublas_handle_, flat_input, this->weights(), this->biases(), this->out_); dim3 blocks(iDivUp(this->out().num_elements(),MAX_THREADS),1,1); dim3 threads(MAX_THREADS,1,1); ApplyTransferFunction<T,TF><<<blocks, threads>>>(this->out_, this->out_); } template <class T, int nthreads, bool hessian> __global__ void FLayerBackpropagateKernel(TensorDev<T> dedy, TensorDev<T> input, TensorDev<T> weights, TensorDev<T> de_dw, TensorDev<T> de_db, TensorDev<T> de_dx_prev) { //volatile __shared__ T smem[nthreads]; T* smem = SharedMemory<T>(); int tx = threadIdx.x; int by = blockIdx.y; int x = tx; int y = by; int tid = tx; smem[tid] = 0; if(x < dedy.num_elements()){ smem[tid] = hessian ? dedy[x]*Sqr(weights(x,y)) : dedy[x]*weights(x,y); //Gradients de_dw(x,y) = hessian ? de_dw(x,y) + dedy[x]*Sqr(input[y]): dedy[x]*input[y]; if(y == 0) de_db[x] = hessian ? de_db[x] + dedy[x] : dedy[x]; } volatile T* vsmem = smem; SmemReduce<T, nthreads>(vsmem, tid); __syncthreads(); //Copy to destination if(x==0) de_dx_prev[y] = smem[0]; __syncthreads(); } //Only compute derrivative without backpropagation template <class T, bool hessian> __global__ void FLayerComputeDerrivKernel(TensorDev<T> dedy, TensorDev<T> input, TensorDev<T> de_dw, TensorDev<T> de_db) { int tx = threadIdx.x; int by = blockIdx.y; int x = tx; int y = by; if(x < dedy.num_elements()){ //Gradients de_dw(x,y) = hessian ? de_dw(x,y) + dedy[x]*Sqr(input[y]): dedy[x]*input[y]; if(y == 0) de_db[x] = hessian ? de_db[x] + dedy[x] : dedy[x]; } } template <class T, class TF> template <bool hessian> inline void FLayer<TensorGPU,T, TF>::BackpropagateKernelProxy(const TensorGPU<T>& input, TensorGPU<T>& dedx_prev) { const TensorGPU<T>& de_dw_in = hessian ? this->d2e_dw2() : this->de_dw(); const TensorGPU<T>& de_db_in = hessian ? this->d2e_db2() : this->de_db(); const TensorGPU<T>& de_dx_in = hessian ? this->d2e_dx2() : this->de_dx(); dim3 threads(MAX_THREADS); dim3 blocks(iDivUp(de_dx_in.num_elements(), MAX_THREADS)); TensorGPU<T> dedy(de_dx_in); ApplyTransferFunctionDerriv<T, TF, hessian><<<blocks, threads>>>(this->out(), de_dx_in, dedy); //TODO: fix this. Weights width can be greather than 1024 int nthreads = iRoundUpPow2(this->weights().w()); threads = dim3(nthreads,1,1); blocks = dim3(1, this->weights().h(),1); size_t smem_size = std::max(nthreads*sizeof(T), 64*sizeof(T)); switch(threads.x) { case 1 : FLayerBackpropagateKernel<T, 1 , hessian><<<blocks, threads, smem_size>>>(dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 2 : FLayerBackpropagateKernel<T,2 , hessian><<<blocks, threads, smem_size>>>(dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 4 : FLayerBackpropagateKernel<T,4 , hessian><<<blocks, threads, smem_size>>>(dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 8 : FLayerBackpropagateKernel<T,8 , hessian><<<blocks, threads, smem_size>>>(dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 16 : FLayerBackpropagateKernel<T,16 , hessian><<<blocks, threads, smem_size>>>(dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 32 : FLayerBackpropagateKernel<T,32 , hessian><<<blocks, threads, smem_size>>>(dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 64 : FLayerBackpropagateKernel<T,64 , hessian><<<blocks, threads, smem_size>>>(dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 128: FLayerBackpropagateKernel<T,128 , hessian><<<blocks, threads, smem_size>>>(dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; case 256: FLayerBackpropagateKernel<T,256 , hessian><<<blocks, threads, smem_size>>>(dedy, input, this->weights(), de_dw_in, de_db_in, dedx_prev); break; default: throw std::runtime_error("Incorrect threads number in BackpropagateKernelProxy"); } cutilCheckMsg("Failed to apply transfer function in FLayer"); } template <class T, class TF> void FLayer<TensorGPU,T, TF>::BackPropagate(const TensorGPU<T>& input, TensorGPU<T>& dedx_prev) { //Flatten the output of previous layer TensorGPU<T> flat_input(input, true); flat_input.Flatten(); TensorGPU<T> flat_dedx_prev(dedx_prev, true); flat_dedx_prev.Flatten(); assert(flat_input.num_elements() == this->weights().h()); assert(de_dw_.HaveSameSize(this->weights())); assert(de_db_.HaveSameSize(this->biases())); //Require only the same number of elements, since last CLayer usually flattened assert(flat_dedx_prev.num_elements() == flat_input.num_elements()); //TODO: Remove the limitation assert(weights().w() <= MAX_THREADS); BackpropagateKernelProxy<false>(flat_input, flat_dedx_prev); } template <class T, class TF> void FLayer<TensorGPU,T, TF>::BackPropagateHessian(const TensorGPU<T>& input, TensorGPU<T>& d2edx2_prev) { //Flatten the output of previous layer TensorGPU<T> flat_input(input, true); flat_input.Flatten(); TensorGPU<T> flat_d2edx2_prev(d2edx2_prev, true); flat_d2edx2_prev.Flatten(); assert(flat_input.num_elements() == this->weights().h()); assert(this->d2e_dw2_.HaveSameSize(this->weights())); assert(this->d2e_db2_.HaveSameSize(this->biases())); assert(flat_d2edx2_prev.num_elements() == flat_input.num_elements()); //Assume this for now. If the weights matrix is bigger than use cublas method assert(this->weights().w() <= MAX_THREADS); BackpropagateKernelProxy<true>(flat_input, flat_d2edx2_prev); //cutilSafeCall(cudaThreadSynchronize()); this->num_hessian_accums_++; } template <class T, class TF> void FLayer<TensorGPU,T, TF>::AverageHessian() { if(this->num_hessian_accums_) { dim3 threads(min(512,this->d2e_dw2_.num_elements()),1,1); dim3 blocks(iDivUp(this->d2e_dw2_.num_elements(),512)); Average<T><<<blocks, threads>>>(this->d2e_dw2_, this->num_hessian_accums_); threads = dim3(min(512,this->d2e_db2_.num_elements()),1,1); blocks = dim3(iDivUp(this->d2e_db2_.num_elements(),512)); Average<T><<<blocks, threads>>>(this->d2e_db2_, this->num_hessian_accums_); this->num_hessian_accums_ = 0; } } template <class T, class TF> void FLayer<TensorGPU,T, TF>::AdaptWeights(T tau, bool use_hessian, T mu) { dim3 threads(MAX_THREADS); if(use_hessian){ dim3 blocks(iDivUp(this->weights().num_elements(),MAX_THREADS)); AdaptWeightsKernel<T><<<threads,blocks>>>(this->weights(), tau, mu, this->de_dw(), this->d2e_dw2()); blocks = dim3(iDivUp(this->biases().num_elements(),MAX_THREADS)); AdaptWeightsKernel<T><<<threads,blocks>>>(this->biases(), tau, mu, this->de_db(), this->d2e_db2()); }else{ dim3 blocks(iDivUp(this->weights().num_elements(),MAX_THREADS)); AdaptWeightsKernel<T><<<threads,blocks>>>(this->weights(), tau, this->de_dw()); blocks = dim3(iDivUp(this->biases().num_elements(),MAX_THREADS)); AdaptWeightsKernel<T><<<threads,blocks>>>(this->biases(), tau, this->de_db()); } } template <class T, class TF> template <bool hessian> void FLayer<TensorGPU, T, TF>::ComputeGradientKernelProxy(const TensorGPU<T>& input) { const TensorGPU<T>& de_dw_in = hessian ? this->d2e_dw2() : this->de_dw(); const TensorGPU<T>& de_db_in = hessian ? this->d2e_db2() : this->de_db(); const TensorGPU<T>& de_dx_in = hessian ? this->d2e_dx2() : this->de_dx(); dim3 threads(MAX_THREADS); dim3 blocks(iDivUp(de_dx_in.num_elements(), MAX_THREADS)); TensorGPU<T> dedy(de_dx_in); ApplyTransferFunctionDerriv<T, TF, hessian><<<blocks, threads>>>(this->out(), de_dx_in, dedy); //TODO: fix this. Weights width can be greather than 1024 threads = dim3(iRoundUpPow2(this->weights().w()),1,1); blocks = dim3(1, this->weights().h(),1); FLayerComputeDerrivKernel<T, hessian><<<blocks, threads>>>(dedy, input, de_dw_in, de_db_in); cutilCheckMsg("Failed to compute derrivative in FLayer"); } /* Compute gradient without backpropagating errors */ template <class T, class TF> void FLayer<TensorGPU, T, TF>::ComputeGradient(const TensorGPU<T>& input) { //Flatten the output of previous layer TensorGPU<T> flat_input(input, true); flat_input.Flatten(); ComputeGradientKernelProxy<false>(flat_input); } /* Compute Hessian without backpropagating errors */ template <class T, class TF> void FLayer<TensorGPU, T, TF>::ComputeHessian(const TensorGPU<T>& input) { //Flatten the output of previous layer TensorGPU<T> flat_input(input, true); flat_input.Flatten(); ComputeGradientKernelProxy<true>(flat_input); this->num_hessian_accums_++; } #endif //HAVE_CUDA }
4789803e7c9a40c873b9db0ddc40133da832a95f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ##################################################################### */ /* */ /* Notice: COPYRIGHT (C) GPU,GROUP. 2010 */ /* THIS PROGRAM IS PROVIDED UNDER THE TERMS OF GPU GROUP */ /* THE PROGRAM MAY ONLY */ /* BE USED IN A MANNER EXPLICITLY SPECIFIED IN THE GPU, */ /* WHICH INCLUDES LIMITATIONS ON COPYING, MODIFYING, */ /* REDISTRIBUTION AND WARANTIES. UNAUTHORIZED USE OF THIS */ /* PROGRAM IS SCTRICTLY PROHIBITED. */ /* ##################################################################### */ #include "../inc/const_defines.h" #include "intra_rc_chroma_kernel.cu" __global__ void iframe_residual_coding_chroam_kernel ( unsigned char * dev_input_uv, //unsigned char * dev_top_neighbor_uv, S_BLK_MB_INFO * dev_blk_mb_info, S_QP_DATA * dev_QpData_uv, short * Quant_tab_uv, short * Dquant_tab_uv, unsigned char * dev_recon_uv, short * dev_dct_coefs_uv, short * dev_dc_coefs_uv, int width_c, int height_c, int width_ref_c, int height_ref_c, int num_mb_hor, int num_mb_ver, int slice_num ) { __shared__ uchar4 s_top_neighbor_uv[8]; __shared__ uchar4 s_left_neighbor_uv[8]; __shared__ unsigned char s_in_uv[64*2]; __shared__ unsigned int Sad[8]; __shared__ short Quant_tab[16]; __shared__ short DQuant_tab[16]; short2 Qcoef_0_01, Qcoef_0_23, Qcoef_1_01, Qcoef_1_23, Qcoef_2_01, Qcoef_2_23, Qcoef_3_01, Qcoef_3_23; uchar4 Pred_Row[4],Rec_Row[4]; __shared__ short QcoefDc[8],TempDC[8]; int TopAvailable, LeftAvailable,pred_mode; S_BLK_MB_INFO BlkMBInfo; S_BLK_MB_INFO BlkMBInfo1; int Quant_Add; int Quant_Shift; int Dquant_Shift; int num_mbs = num_mb_hor*num_mb_ver; int input_index,top_index,rec_index,coef_index; int first_mb; int tid_x,tid_y; tid_x = threadIdx.x; tid_y = threadIdx.y; int tid = tid_x + tid_y * blockDim.x; Quant_tab[tid] = Quant_tab_uv[tid]; Quant_tab[tid+8] = Quant_tab_uv[tid+8]; DQuant_tab[tid] = Dquant_tab_uv[tid]; DQuant_tab[tid+8] = Dquant_tab_uv[tid+8]; Quant_Add = dev_QpData_uv->QuantAdd; Quant_Shift = dev_QpData_uv->QuantShift; Dquant_Shift = dev_QpData_uv->DQuantShift; first_mb = blockIdx.x*num_mb_hor* num_mb_ver/slice_num; // s_left_neighbor_uv[tid].x = 0; //left pix for uv componentfirst 4 threads for u ,the rest for v s_left_neighbor_uv[tid].y = 0; s_left_neighbor_uv[tid].z = 0; s_left_neighbor_uv[tid].w = 0; for(int j =0;j< num_mb_ver/slice_num;j++) { input_index = tid+j*width_c*MB_HEIGHT_C + blockIdx.x*MB_HEIGHT_C*width_c*(num_mb_ver/slice_num); rec_index = tid_x*BLK_WIDTH + (tid_y&1)*width_ref_c*BLK_HEIGHT + j*MB_WIDTH_C*width_ref_c + (tid_y>>1)*height_ref_c*width_ref_c + blockIdx.x*MB_HEIGHT_C*width_ref_c*(num_mb_ver/slice_num); coef_index = (tid&3)*16+j*num_mb_hor*MB_TOTAL_SIZE_C + (tid_y>>1)*MB_TOTAL_SIZE_C*num_mbs + first_mb*MB_TOTAL_SIZE_C; top_index = tid_x*BLK_WIDTH + (j*MB_HEIGHT_C-1)*width_ref_c + (tid_y>>1)*width_ref_c*height_ref_c + blockIdx.x*MB_HEIGHT_C*width_ref_c*(num_mb_ver/slice_num) ; for(int k =0;k< num_mb_hor;k++) { // for (int i=0;i<16;i++) { s_in_uv[tid+i*8]= dev_input_uv[input_index+(i>>3)*width_c*height_c+(i&7)*width_c]; } BlkMBInfo = dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB]; BlkMBInfo1 = dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB + 8]; TopAvailable = (BlkMBInfo.Loc & LOC_MB_TOP_EDGE) ? 0 : 1; LeftAvailable = (BlkMBInfo.Loc & LOC_MB_LEFT_EDGE) ? 0 : 1; if(TopAvailable) { s_top_neighbor_uv[tid].x = dev_recon_uv[top_index]; s_top_neighbor_uv[tid].y = dev_recon_uv[top_index+1]; s_top_neighbor_uv[tid].z = dev_recon_uv[top_index+2]; s_top_neighbor_uv[tid].w = dev_recon_uv[top_index+3]; } // IntraChromaPrediction_cu( s_top_neighbor_uv, s_left_neighbor_uv, s_in_uv+tid_x*4+tid_y*32, TopAvailable, LeftAvailable, Pred_Row, Sad, pred_mode, tid_x, tid_y ); BlkMBInfo.IntraChromaMode = pred_mode; BlkMBInfo1.IntraChromaMode = pred_mode; dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB]=BlkMBInfo; dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB +8]=BlkMBInfo1; IntraChromaTransforms_cu( s_in_uv+tid_x*4+tid_y*32, Pred_Row, Rec_Row, Quant_Add, Quant_Shift, Dquant_Shift, Quant_tab, DQuant_tab, Qcoef_0_01, Qcoef_0_23, Qcoef_1_01, Qcoef_1_23, Qcoef_2_01, Qcoef_2_23, Qcoef_3_01, Qcoef_3_23, QcoefDc, TempDC, tid_x, tid_y ); for(int i=0;i<4;i++) { dev_recon_uv[rec_index+i*width_ref_c+0] = Rec_Row[i].x; dev_recon_uv[rec_index+i*width_ref_c+1] = Rec_Row[i].y; dev_recon_uv[rec_index+i*width_ref_c+2] = Rec_Row[i].z; dev_recon_uv[rec_index+i*width_ref_c+3] = Rec_Row[i].w; } dev_dct_coefs_uv[coef_index] = Qcoef_0_01.x; dev_dct_coefs_uv[coef_index+1] = Qcoef_0_01.y; dev_dct_coefs_uv[coef_index+2] = Qcoef_0_23.x; dev_dct_coefs_uv[coef_index+3] = Qcoef_0_23.y; dev_dct_coefs_uv[coef_index+4] = Qcoef_1_01.x; dev_dct_coefs_uv[coef_index+5] = Qcoef_1_01.y; dev_dct_coefs_uv[coef_index+6] = Qcoef_1_23.x; dev_dct_coefs_uv[coef_index+7] = Qcoef_1_23.y; dev_dct_coefs_uv[coef_index+8] = Qcoef_2_01.x; dev_dct_coefs_uv[coef_index+9] = Qcoef_2_01.y; dev_dct_coefs_uv[coef_index+10] = Qcoef_2_23.x; dev_dct_coefs_uv[coef_index+11] = Qcoef_2_23.y; dev_dct_coefs_uv[coef_index+12] = Qcoef_3_01.x; dev_dct_coefs_uv[coef_index+13] = Qcoef_3_01.y; dev_dct_coefs_uv[coef_index+14] = Qcoef_3_23.x; dev_dct_coefs_uv[coef_index+15] = Qcoef_3_23.y; dev_dc_coefs_uv[(tid%4) + k*BLOCKS_PER_MB_C + j*BLOCKS_PER_MB_C*num_mb_hor +(tid_y>>1)*BLOCKS_PER_MB_C*num_mbs + first_mb*BLOCKS_PER_MB_C] = QcoefDc[tid]; // write mbinfo for next mb row if(tid_x == 1) { s_left_neighbor_uv [tid-1].x = s_left_neighbor_uv [tid].x = Rec_Row[0].w; s_left_neighbor_uv [tid-1].y = s_left_neighbor_uv [tid].y = Rec_Row[1].w; s_left_neighbor_uv [tid-1].z = s_left_neighbor_uv [tid].z = Rec_Row[2].w; s_left_neighbor_uv [tid-1].w = s_left_neighbor_uv [tid].w = Rec_Row[3].w; } input_index += MB_WIDTH_C; top_index += MB_WIDTH_C; rec_index += MB_WIDTH_C; coef_index += MB_TOTAL_SIZE_C; } } } inline __device__ void Chroma_inter_HadamardTransformAndQuantize_kernel( short *TempDC, int &Quant_Add, int &Quant_Shift, short &Quant_tab0, short *QCoefDC, int &tid ) { short TempCoef; int QAdd; int Sign; int QuantDCShift; QAdd = Quant_Add * 2; QuantDCShift = Quant_Shift + 1; if((tid&3)==0) { TempCoef = TempDC[tid] + TempDC[1+tid] + TempDC[2+tid] + TempDC[3+tid]; } else if((tid&3)==1) { TempCoef = TempDC[0+(tid>>2)*4] - TempDC[1+(tid>>2)*4] + TempDC[2+(tid>>2)*4] - TempDC[3+(tid>>2)*4]; } else if((tid&3)==2) { TempCoef = TempDC[0+(tid>>2)*4] + TempDC[1+(tid>>2)*4] - TempDC[2+(tid>>2)*4] - TempDC[3+(tid>>2)*4]; } else { TempCoef = TempDC[0+(tid>>2)*4] - TempDC[1+(tid>>2)*4] - TempDC[2+(tid>>2)*4] + TempDC[3+(tid>>2)*4]; } Sign = (TempCoef >= 0) ? 1 : -1; TempCoef = (TempCoef >= 0) ? TempCoef : -TempCoef; TempCoef = min (((TempCoef * Quant_tab0 + QAdd) >> QuantDCShift), TempCoef); QCoefDC[tid] = Sign * TempCoef; } // inline __device__ void Chroma_inter_IHadamard2x2AndDQuant_kernel( short *TempDC, int &DQuant_Shift, short &DQuant_tab0, short *QCoefDC, int &tid ) { short TempCoef; QCoefDC[tid] = (TempDC[tid] * DQuant_tab0 << DQuant_Shift); if((tid&3)==0) { TempCoef = QCoefDC[tid] + QCoefDC[1+tid] + QCoefDC[2+tid] + QCoefDC[3+tid]; } else if((tid&3)==1) { TempCoef = QCoefDC[0+(tid>>2)*4] - QCoefDC[1+(tid>>2)*4] + QCoefDC[2+(tid>>2)*4] - QCoefDC[3+(tid>>2)*4]; } else if((tid&3)==2) { TempCoef = QCoefDC[0+(tid>>2)*4] + QCoefDC[1+(tid>>2)*4] - QCoefDC[2+(tid>>2)*4] - QCoefDC[3+(tid>>2)*4]; } else { TempCoef = QCoefDC[0+(tid>>2)*4] - QCoefDC[1+(tid>>2)*4] - QCoefDC[2+(tid>>2)*4] + QCoefDC[3+(tid>>2)*4]; } QCoefDC [tid] = (TempCoef>>1); } inline __device__ void Chroma_inter_TransformAndQuantize_kernel(short *Diff,short *Dc_coef,short *Quant_tables,int &QAdd,int &Quant_Shift,int &tx,int &ty,int &tz) { short Sum0,Sum1,Diff0,Diff1; short coef0,coef1,coef2,coef3; int tid_index; int sign; // tid_index = tx+(ty&1)*8+(ty>>1)*64+tz*128; Sum0 = (Diff[tid_index] + Diff[tid_index+48]); Sum1 = (Diff[tid_index+16] + Diff[tid_index+32]); Diff0 = (Diff[tid_index] - Diff[tid_index+48]); Diff1 = (Diff[tid_index+16] - Diff[tid_index+32]); Diff[tid_index] = Sum0 + Sum1; Diff[tid_index+32] = Sum0 - Sum1; Diff[tid_index+16] = 2 * Diff0 + Diff1; Diff[tid_index+48] = Diff0 - 2 * Diff1; __syncthreads(); //warp //44*4 tid_index = ((tx&3)<<4)+((tx>>2)<<2)+((ty>>1)<<3)+((ty&1)<<6)+(tz<<7); //tid_index = (tx&3)*16+(tx>>2)*4+(ty>>1)*8+(ty&1)*64+tz*128; Sum0 = (Diff[tid_index] + Diff[tid_index+3]); Sum1 = (Diff[tid_index+1] + Diff[tid_index+2]); Diff0 = (Diff[tid_index] - Diff[tid_index+3]); Diff1 = (Diff[tid_index+1] - Diff[tid_index+2]); coef0= Sum0 + Sum1; coef2 = Sum0 - Sum1; coef1 = 2 * Diff0 + Diff1; coef3 = Diff0 - 2 * Diff1; if((tx&3) == 0 ) //,04 { Dc_coef[(tx>>2)+(ty<<1)+(tz<<3)] = coef0; } //,8x8blk-falt() tid_index = tx*4+ty*32+tz*128; sign = (coef0 >= 0) ? 1 : -1; coef0 = (coef0 >= 0) ? coef0 : -coef0; Diff[tid_index] = sign * ((coef0 * Quant_tables[(tx&3)*4] + QAdd) >> Quant_Shift); sign = (coef1 >= 0) ? 1 : -1; coef1 = (coef1 >= 0) ? coef1 : -coef1; Diff[tid_index+1] = sign * ((coef1 * Quant_tables[(tx&3)*4+1] + QAdd) >> Quant_Shift); sign = (coef2 >= 0) ? 1 : -1; coef2 = (coef2 >= 0) ? coef2 : -coef2; Diff[tid_index+2] = sign * ((coef2 * Quant_tables[(tx&3)*4+2] + QAdd) >> Quant_Shift); sign = (coef3 >= 0) ? 1 : -1; coef3 = (coef3 >= 0) ? coef3 : -coef3; Diff[tid_index+3] = sign * ((coef3 * Quant_tables[(tx&3)*4+3] + QAdd) >> Quant_Shift); } // inline __device__ void Chroma_inter_DQuantAndITransform_kernel(short *dct_coef,short * dc_coef,unsigned int *pred,short *DQuant_tables,int &DQuant_Shift,int &tx,int &ty,int &tz) { short Sum0,Sum1,Diff0,Diff1; short coef0,coef4,coef8,coef12; int tid_index; int tid_block = tx+ty*8+tz*32; // dct_coef[tid_block] = (dct_coef[tid_block] * DQuant_tables[tx+(ty&1)*8]) << DQuant_Shift; dct_coef[tid_block+64] = (dct_coef[tid_block+64] * DQuant_tables[tx+(ty&1)*8]) << DQuant_Shift; dct_coef[tid_block+128] = (dct_coef[tid_block+128] * DQuant_tables[tx+(ty&1)*8]) << DQuant_Shift; dct_coef[tid_block+192] = (dct_coef[tid_block+192] * DQuant_tables[tx+(ty&1)*8]) << DQuant_Shift; __syncthreads(); // if(tid_block < 16) { dct_coef[tid_block*16] = dc_coef[tid_block]; } __syncthreads(); tid_index = tx*4+ty*32+tz*128; //tid_index = tx*4+ty*blockDim.x*4+tz*blockDim.x*blockDim.y*4; // Sum0 = dct_coef[tid_index] + dct_coef[tid_index+2]; Diff0 = dct_coef[tid_index] - dct_coef[tid_index+2]; Diff1 = (dct_coef[tid_index+1] >> 1) - dct_coef[tid_index+3]; Sum1 = dct_coef[tid_index+1] + (dct_coef[tid_index+3] >> 1); dct_coef[tid_index] = Sum0 + Sum1; dct_coef[tid_index+1] = Diff0 + Diff1; dct_coef[tid_index+2] = Diff0 - Diff1; dct_coef[tid_index+3] = Sum0 - Sum1; __syncthreads(); // tid_index = (tx&3)+((tx>>2)<<4)+ty*32+tz*128; Sum0 = (dct_coef[tid_index] + dct_coef[tid_index+8]); Sum1 = dct_coef[tid_index+4] + (dct_coef[tid_index+12]>>1); Diff0 = (dct_coef[tid_index] - dct_coef[tid_index+8]); Diff1 = ((dct_coef[tid_index+4]>>1) - dct_coef[tid_index+12]); tid_index = tx + ((ty&1)<<6) + ((ty>>1)<<3) + (tz<<7); coef0 = (Sum0 + Sum1 + 32) >> 6; coef0 = coef0 + pred[tid_index]; coef4 = (Diff0 + Diff1 + 32) >> 6; coef4 = coef4 + pred[tid_index+16]; coef8 = (Diff0 - Diff1 + 32) >> 6; coef8 = coef8 + pred[tid_index+32]; coef12 = (Sum0 - Sum1 + 32) >> 6; coef12 = coef12 + pred[tid_index+48]; pred[tid_index] = (unsigned char)(coef0 < 0 ? 0 :((coef0 > 255) ? 255 : coef0)); pred[tid_index+16] = (unsigned char)(coef4 < 0 ? 0 :((coef4 > 255) ? 255 : coef4)); pred[tid_index+32] = (unsigned char)(coef8 < 0 ? 0 :((coef8 > 255) ? 255 : coef8)); pred[tid_index+48] = (unsigned char)(coef12 < 0 ? 0 :((coef12 > 255) ? 255 : coef12)); } __global__ void MotionCompensateChroma_kernel( unsigned char *dev_ref_uv, unsigned char *dev_pred_uv, S_BLK_MB_INFO *dev_blk_mb_info, int enc_width_c, int enc_height_c, int ref_width_c, int ref_height_c, int RefStride2BeginUV/*, int *dev_index, unsigned char *dev_pred_ref*/ ) { int tid_x = threadIdx.x; int tid_y = threadIdx.y; int tid_z = threadIdx.z; int tid_blk = tid_x + tid_y*blockDim.x + tid_z*blockDim.x*blockDim.y; /*__shared__ unsigned char ref_uv[9*9*2];*/ __shared__ unsigned int ref_uv[9*9*2]; __shared__ S_BLK_MB_INFO blk_mb_info[BLOCKS_PER_MB]; int index_blk_info; //2*2 int mv_x,mv_y,ref_x,ref_y,FracY,FracX; int ref_offset,pre_offset; unsigned int left0,right0,left1,right1,result; int value0,value1,value2,value3; index_blk_info = (tid_x>>1) + (tid_y>>1)*4; pre_offset = tid_x + tid_y*enc_width_c + tid_z*enc_width_c*enc_height_c + blockIdx.x*MB_WIDTH_C+blockIdx.y*MB_HEIGHT_C*enc_width_c; //blockMB info, if(tid_blk < 16) { blk_mb_info[tid_blk] = dev_blk_mb_info[tid_blk + blockIdx.x*BLOCKS_PER_MB+blockIdx.y*(enc_width_c>>3)*BLOCKS_PER_MB]; } __syncthreads(); mv_x = blk_mb_info[index_blk_info].MV.x; mv_y = blk_mb_info[index_blk_info].MV.y; ref_x = (mv_x>>3) + tid_x + blockIdx.x*8; ref_y = (mv_y>>3) + tid_y + blockIdx.y*8; FracY = (mv_y & 0x7); FracX = (mv_x & 0x7); ref_x = (ref_x < -MB_WIDTH_C ) ? -MB_WIDTH_C :((ref_x > (ref_width_c - 1)) ? (ref_width_c - 1) : ref_x); ref_y = (ref_y < -MB_HEIGHT_C ) ? -MB_HEIGHT_C :((ref_y > (ref_height_c - 1)) ? (ref_height_c - 1) : ref_y); ref_offset = ref_x + ref_y*ref_width_c + RefStride2BeginUV+tid_z*ref_width_c*ref_height_c; //8*8 ref_uv[tid_x+tid_y*9+tid_z*9*9] = (unsigned int)dev_ref_uv[ref_offset]; //8 if(tid_y==0) { ref_x = (ref_x + (blockDim.x-tid_x)); ref_y = (ref_y + tid_x); ref_x = (ref_x < -MB_WIDTH_C ) ? -MB_WIDTH_C :((ref_x > (ref_width_c - 1)) ? (ref_width_c - 1) : ref_x); ref_y = (ref_y < -MB_HEIGHT_C ) ? -MB_HEIGHT_C :((ref_y > (ref_height_c - 1)) ? (ref_height_c - 1) : ref_y); ref_offset = ref_x + ref_y*ref_width_c + RefStride2BeginUV+tid_z*ref_width_c*ref_height_c; ref_uv[tid_x*9+tid_z*9*9+8] = (unsigned int)dev_ref_uv[ref_offset]; } if(tid_y == (blockDim.y - 1)) { ref_x = (ref_x < -MB_WIDTH_C ) ? -MB_WIDTH_C :((ref_x > (ref_width_c - 1)) ? (ref_width_c - 1) : ref_x); ref_y = ((ref_y+1) < -MB_HEIGHT_C ) ? -MB_HEIGHT_C :(((ref_y+1) > (ref_height_c - 1)) ? (ref_height_c - 1) : (ref_y+1)); ref_offset = ref_x + ref_y*ref_width_c + RefStride2BeginUV+tid_z*ref_width_c*ref_height_c; ref_uv[tid_x + blockDim.y*9+tid_z*9*9] = (unsigned int)dev_ref_uv[ref_offset]; } if((tid_x==(blockDim.x-1))&&(tid_y==(blockDim.y-1))) { ref_x = (((ref_x+1) < -MB_WIDTH_C ) ? -MB_WIDTH_C :(((ref_x+1) > (ref_width_c - 1)) ? (ref_width_c - 1) : (ref_x+1))); ref_offset = ref_x + ref_y*ref_width_c + RefStride2BeginUV+tid_z*ref_width_c*ref_height_c; ref_uv[(tid_z+1)*9*9-1] = (unsigned int)dev_ref_uv[ref_offset]; } __syncthreads(); left0 = ref_uv[tid_x + tid_y*9+tid_z*81]; right0 = ref_uv[tid_x + tid_y*9+tid_z*81 + 1]; left1 = ref_uv[tid_x + (tid_y+1)*9+tid_z*81]; right1 = ref_uv[tid_x + (tid_y+1)*9+tid_z*81+1]; value0 = (8 - FracX) * (8 - FracY) * (int)(left0); value1 = ( FracX) * (8 - FracY) * (int)(right0); value2 = (8 - FracX) * ( FracY) * (int)(left1); value3 = ( FracX) * ( FracY) * (int)(right1); result = (unsigned char)((value0 + value1 + value2 + value3 + 32) >> 6); dev_pred_uv[pre_offset] = result; } //P __global__ void ChromaPFrameInterResidualCoding_kernel( unsigned char *dev_input_uv, unsigned char *dev_pred_uv, unsigned char *dev_recon_uv, short *dev_dct_coefs_uv, short *dev_dc_coefs_uv, short *dev_Quant_tables, short *dev_Dquant_tables, S_QP_DATA *dev_QpData, int enc_width_c, int enc_height_c, int ref_width_c, int ref_height_c, int num_mb_hor, int num_mb_ver ) { //dim3 threads(8,4,2); int tid_x = threadIdx.x; int tid_y = threadIdx.y; int tid_z = threadIdx.z; int tid_blk = tid_x+ (tid_y+tid_z*blockDim.y)*blockDim.x; int offset_input = blockIdx.x*2*MB_WIDTH_C+blockIdx.y*MB_HEIGHT_C*enc_width_c + tid_z*enc_width_c*enc_height_c; // Blockblock int offset_output = blockIdx.x*2*MB_WIDTH_C+blockIdx.y*MB_HEIGHT_C*ref_width_c +tid_z*ref_width_c*ref_height_c; //Block int tid_index = tid_z*MB_HEIGHT_C*MB_HEIGHT_C*2+tid_y*8+tid_x; //Block int out_dct_index,out_rec_index; // unsigned char src_input; __shared__ unsigned int pred_in[MB_TOTAL_SIZE_C*4]; __shared__ unsigned int src_in[MB_TOTAL_SIZE_C*4]; __shared__ short Diff[MB_TOTAL_SIZE_C*4]; __shared__ short Dc_coef[16]; __shared__ short Dc_coef_temp[16]; __shared__ short Quant_tables[16]; __shared__ short DQuant_tables[16]; if(tid_blk < 16) { Quant_tables[tid_blk] = dev_Quant_tables[tid_blk]; DQuant_tables[tid_blk] = dev_Dquant_tables[tid_blk]; } int Quant_Add = dev_QpData->QuantAdd; int Quant_Shift = dev_QpData->QuantShift; int Dquant_Shift = dev_QpData->DQuantShift; //1616*8 for(int i=0;i<4;i++) { src_in[tid_index+i*32] = ( unsigned int)dev_input_uv[offset_input + tid_x+(tid_y&1)*8 + (tid_y>>1)*enc_width_c + i*2*enc_width_c]; pred_in[tid_index+i*32] = (unsigned int)dev_pred_uv[offset_input + tid_x+(tid_y&1)*8 + (tid_y>>1)*enc_width_c + i*2*enc_width_c]; Diff[tid_index+i*32] = (short)(src_in[tid_index+i*32] - pred_in[tid_index+i*32]); } __syncthreads(); //DCT Chroma_inter_TransformAndQuantize_kernel(Diff,Dc_coef_temp,Quant_tables,Quant_Add,Quant_Shift,tid_x,tid_y,tid_z); __syncthreads(); //42*216 if(tid_blk < 16) { int out_dc_index = (tid_blk&7)+blockIdx.x*2*BLOCKS_PER_MB_C+blockIdx.y*BLOCKS_PER_MB_C*num_mb_hor+tid_y*num_mb_hor*num_mb_ver*BLOCKS_PER_MB_C; Chroma_inter_HadamardTransformAndQuantize_kernel( Dc_coef_temp, Quant_Add, Quant_Shift, Quant_tables[0], Dc_coef, tid_blk ); dev_dc_coefs_uv[out_dc_index] = Dc_coef[tid_blk]; Chroma_inter_IHadamard2x2AndDQuant_kernel( Dc_coef, Dquant_Shift, DQuant_tables[0], Dc_coef_temp, tid_blk ); } __syncthreads(); out_dct_index = tid_x+tid_y*blockDim.x+blockIdx.x*2*MB_TOTAL_SIZE_C+blockIdx.y*num_mb_hor*MB_TOTAL_SIZE_C+tid_z*num_mb_hor*num_mb_ver*MB_TOTAL_SIZE_C; for(int i=0;i<4;i++) { dev_dct_coefs_uv[out_dct_index+i*32] = Diff[tid_x+tid_y*blockDim.x + tid_z*2*MB_TOTAL_SIZE_C + i*32]; } __syncthreads(); // Chroma_inter_DQuantAndITransform_kernel(Diff, Dc_coef_temp, pred_in, DQuant_tables, Dquant_Shift, tid_x, tid_y, tid_z); out_rec_index = offset_output + tid_x+(tid_y&1)*8 + (tid_y>>1)*ref_width_c; for(int i = 0;i <4;i++) { dev_recon_uv[out_rec_index + i*2*ref_width_c] = pred_in[tid_x + tid_y*blockDim.x + tid_z*2*MB_TOTAL_SIZE_C + i*32]; } } __global__ void Chroma_PFrame_Intra_ResidualCoding_kernel( unsigned char *dev_input_uv, unsigned char *dev_recon_uv, S_BLK_MB_INFO *dev_blk_mb_info, short *dev_dct_coefs_uv, short *dev_dc_coefs_uv, short *dev_Quant_tables, short *dev_Dquant_tables, S_QP_DATA *dev_QpData, int enc_width_c, int enc_height_c, int ref_width_c, int ref_height_c, int num_mb_hor, int num_mb_ver, int slice_num ) { //dim3 threads(2,4,1); __shared__ uchar4 s_top_neighbor[8]; __shared__ uchar4 s_left_neighbor[8]; __shared__ unsigned char src_in[MB_TOTAL_SIZE_C*2]; __shared__ unsigned int Sad[8]; __shared__ short Quant_tab[16]; __shared__ short DQuant_tab[16]; short2 Qcoef_0_01, Qcoef_0_23, Qcoef_1_01, Qcoef_1_23, Qcoef_2_01, Qcoef_2_23, Qcoef_3_01, Qcoef_3_23; uchar4 Pred_Row[4],Rec_Row[4]; __shared__ short QcoefDc[8],TempDC[8]; int TopAvailable, LeftAvailable,pred_mode; S_BLK_MB_INFO BlkMBInfo; S_BLK_MB_INFO BlkMBInfo1; int Quant_Add; int Quant_Shift; int Dquant_Shift; int num_mbs = num_mb_hor*num_mb_ver; int rec_index,coef_index; int first_mb; int tid_x,tid_y; tid_x = threadIdx.x; tid_y = threadIdx.y; int tid = tid_x + tid_y * blockDim.x; Quant_tab[tid] = dev_Quant_tables[tid]; Quant_tab[tid+8] = dev_Quant_tables[tid+8]; DQuant_tab[tid] = dev_Dquant_tables[tid]; DQuant_tab[tid+8] = dev_Dquant_tables[tid+8]; Quant_Add = dev_QpData->QuantAdd; Quant_Shift = dev_QpData->QuantShift; Dquant_Shift = dev_QpData->DQuantShift; first_mb = blockIdx.x*num_mb_hor* num_mb_ver/slice_num; // for(int j =0;j< num_mb_ver/slice_num;j++) { //input_index = tid_x*BLK_WIDTH +(tid_y&1)*enc_width_c*BLK_HEIGHT + j*enc_width_c*MB_WIDTH_C + (tid_y>>1)*enc_width_c*enc_height_c; rec_index = tid_x*BLK_WIDTH + (tid_y&1)*ref_width_c*BLK_HEIGHT + j*MB_HEIGHT_C*ref_width_c + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num); coef_index = ((tid&3)<<4)+j*num_mb_hor*MB_TOTAL_SIZE_C + (tid_y>>1)*enc_width_c*enc_height_c + first_mb*MB_TOTAL_SIZE_C; s_left_neighbor[tid].x = 0; s_left_neighbor[tid].y = 0; s_left_neighbor[tid].z = 0; s_left_neighbor[tid].w = 0; for(int k =0;k< num_mb_hor;k++) { BlkMBInfo = dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB]; BlkMBInfo1 = dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB +8 + first_mb*BLOCKS_PER_MB]; if((BlkMBInfo.Type == INTRA_LARGE_BLOCKS_MB_TYPE) || (BlkMBInfo.Type == INTRA_SMALL_BLOCKS_MB_TYPE)) { // for (int i=0;i<16;i++) { src_in[tid+i*8]= dev_input_uv[tid+k*MB_WIDTH_C+j*enc_width_c*MB_WIDTH_C+(i>>3)*enc_width_c*enc_height_c+(i&7)*enc_width_c + blockIdx.x*MB_HEIGHT_C*enc_width_c*(num_mb_ver/slice_num)]; } if(j==0) { s_top_neighbor[tid].x = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; s_top_neighbor[tid].y = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 1]; s_top_neighbor[tid].z = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 2]; s_top_neighbor[tid].w = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 3]; } else { s_top_neighbor[tid].x = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (j*MB_HEIGHT_C-1)*ref_width_c + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; s_top_neighbor[tid].y = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (j*MB_HEIGHT_C-1)*ref_width_c + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 1]; s_top_neighbor[tid].z = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (j*MB_HEIGHT_C-1)*ref_width_c + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 2]; s_top_neighbor[tid].w = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (j*MB_HEIGHT_C-1)*ref_width_c + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 3]; } TopAvailable = (BlkMBInfo.Loc & LOC_MB_TOP_EDGE) ? 0 : 1; LeftAvailable = (BlkMBInfo.Loc & LOC_MB_LEFT_EDGE) ? 0 : 1; // IntraChromaPrediction_cu( s_top_neighbor, s_left_neighbor, src_in+tid_x*4+tid_y*32, TopAvailable, LeftAvailable, Pred_Row, Sad, pred_mode, tid_x, tid_y ); BlkMBInfo.IntraChromaMode = pred_mode; BlkMBInfo1.IntraChromaMode = pred_mode; dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB]=BlkMBInfo; dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB +8 + first_mb*BLOCKS_PER_MB]=BlkMBInfo1; IntraChromaTransforms_cu( src_in+tid_x*4+tid_y*32, Pred_Row, Rec_Row, Quant_Add, Quant_Shift, Dquant_Shift, Quant_tab, DQuant_tab, Qcoef_0_01, Qcoef_0_23, Qcoef_1_01, Qcoef_1_23, Qcoef_2_01, Qcoef_2_23, Qcoef_3_01, Qcoef_3_23, QcoefDc, TempDC, tid_x, tid_y ); for(int i=0;i<4;i++) { dev_recon_uv[rec_index+i*ref_width_c+0] = Rec_Row[i].x; dev_recon_uv[rec_index+i*ref_width_c+1] = Rec_Row[i].y; dev_recon_uv[rec_index+i*ref_width_c+2] = Rec_Row[i].z; dev_recon_uv[rec_index+i*ref_width_c+3] = Rec_Row[i].w; } dev_dct_coefs_uv[coef_index] = Qcoef_0_01.x; dev_dct_coefs_uv[coef_index+1] = Qcoef_0_01.y; dev_dct_coefs_uv[coef_index+2] = Qcoef_0_23.x; dev_dct_coefs_uv[coef_index+3] = Qcoef_0_23.y; dev_dct_coefs_uv[coef_index+4] = Qcoef_1_01.x; dev_dct_coefs_uv[coef_index+5] = Qcoef_1_01.y; dev_dct_coefs_uv[coef_index+6] = Qcoef_1_23.x; dev_dct_coefs_uv[coef_index+7] = Qcoef_1_23.y; dev_dct_coefs_uv[coef_index+8] = Qcoef_2_01.x; dev_dct_coefs_uv[coef_index+9] = Qcoef_2_01.y; dev_dct_coefs_uv[coef_index+10] = Qcoef_2_23.x; dev_dct_coefs_uv[coef_index+11] = Qcoef_2_23.y; dev_dct_coefs_uv[coef_index+12] = Qcoef_3_01.x; dev_dct_coefs_uv[coef_index+13] = Qcoef_3_01.y; dev_dct_coefs_uv[coef_index+14] = Qcoef_3_23.x; dev_dct_coefs_uv[coef_index+15] = Qcoef_3_23.y; dev_dc_coefs_uv[(tid&3) + k*BLOCKS_PER_MB_C + j*BLOCKS_PER_MB_C*num_mb_hor +(tid_y>>1)*BLOCKS_PER_MB_C*num_mbs + first_mb*BLOCKS_PER_MB_C] = QcoefDc[tid]; if(tid_x == 1) { s_left_neighbor [tid-1].x = s_left_neighbor [tid].x = Rec_Row[0].w; s_left_neighbor [tid-1].y = s_left_neighbor [tid].y = Rec_Row[1].w; s_left_neighbor [tid-1].z = s_left_neighbor [tid].z = Rec_Row[2].w; s_left_neighbor [tid-1].w = s_left_neighbor [tid].w = Rec_Row[3].w; } } else { s_left_neighbor [tid].x = dev_recon_uv[k*MB_WIDTH_C + j*MB_HEIGHT_C*ref_width_c + (tid_y&1)*4*ref_width_c +(tid_y>>1)*ref_width_c*ref_height_c + 7 + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; s_left_neighbor [tid].y = dev_recon_uv[k*MB_WIDTH_C + j*MB_HEIGHT_C*ref_width_c + (tid_y&1)*4*ref_width_c +(tid_y>>1)*ref_width_c*ref_height_c + ref_width_c + 7 + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; s_left_neighbor [tid].z = dev_recon_uv[k*MB_WIDTH_C + j*MB_HEIGHT_C*ref_width_c + (tid_y&1)*4*ref_width_c +(tid_y>>1)*ref_width_c*ref_height_c + 2*ref_width_c + 7 + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; s_left_neighbor [tid].w = dev_recon_uv[k*MB_WIDTH_C + j*MB_HEIGHT_C*ref_width_c + (tid_y&1)*4*ref_width_c +(tid_y>>1)*ref_width_c*ref_height_c + 3*ref_width_c + 7 + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; } rec_index += MB_WIDTH_C; coef_index += MB_TOTAL_SIZE_C; } } }
4789803e7c9a40c873b9db0ddc40133da832a95f.cu
/* ##################################################################### */ /* */ /* Notice: COPYRIGHT (C) GPU,GROUP. 2010 */ /* THIS PROGRAM IS PROVIDED UNDER THE TERMS OF GPU GROUP */ /* THE PROGRAM MAY ONLY */ /* BE USED IN A MANNER EXPLICITLY SPECIFIED IN THE GPU, */ /* WHICH INCLUDES LIMITATIONS ON COPYING, MODIFYING, */ /* REDISTRIBUTION AND WARANTIES. UNAUTHORIZED USE OF THIS */ /* PROGRAM IS SCTRICTLY PROHIBITED. */ /* ##################################################################### */ #include "../inc/const_defines.h" #include "intra_rc_chroma_kernel.cu" __global__ void iframe_residual_coding_chroam_kernel ( unsigned char * dev_input_uv, //unsigned char * dev_top_neighbor_uv, S_BLK_MB_INFO * dev_blk_mb_info, S_QP_DATA * dev_QpData_uv, short * Quant_tab_uv, short * Dquant_tab_uv, unsigned char * dev_recon_uv, short * dev_dct_coefs_uv, short * dev_dc_coefs_uv, int width_c, int height_c, int width_ref_c, int height_ref_c, int num_mb_hor, int num_mb_ver, int slice_num ) { __shared__ uchar4 s_top_neighbor_uv[8]; __shared__ uchar4 s_left_neighbor_uv[8]; __shared__ unsigned char s_in_uv[64*2]; __shared__ unsigned int Sad[8]; __shared__ short Quant_tab[16]; __shared__ short DQuant_tab[16]; short2 Qcoef_0_01, Qcoef_0_23, Qcoef_1_01, Qcoef_1_23, Qcoef_2_01, Qcoef_2_23, Qcoef_3_01, Qcoef_3_23; uchar4 Pred_Row[4],Rec_Row[4]; __shared__ short QcoefDc[8],TempDC[8]; int TopAvailable, LeftAvailable,pred_mode; S_BLK_MB_INFO BlkMBInfo; S_BLK_MB_INFO BlkMBInfo1; int Quant_Add; int Quant_Shift; int Dquant_Shift; int num_mbs = num_mb_hor*num_mb_ver; int input_index,top_index,rec_index,coef_index; int first_mb; int tid_x,tid_y; tid_x = threadIdx.x; tid_y = threadIdx.y; int tid = tid_x + tid_y * blockDim.x; Quant_tab[tid] = Quant_tab_uv[tid]; Quant_tab[tid+8] = Quant_tab_uv[tid+8]; DQuant_tab[tid] = Dquant_tab_uv[tid]; DQuant_tab[tid+8] = Dquant_tab_uv[tid+8]; Quant_Add = dev_QpData_uv->QuantAdd; Quant_Shift = dev_QpData_uv->QuantShift; Dquant_Shift = dev_QpData_uv->DQuantShift; first_mb = blockIdx.x*num_mb_hor* num_mb_ver/slice_num; //将全局存储器中的数据加载到共享存储器 s_left_neighbor_uv[tid].x = 0; //left pix for uv component,first 4 threads for u ,the rest for v s_left_neighbor_uv[tid].y = 0; s_left_neighbor_uv[tid].z = 0; s_left_neighbor_uv[tid].w = 0; for(int j =0;j< num_mb_ver/slice_num;j++) { input_index = tid+j*width_c*MB_HEIGHT_C + blockIdx.x*MB_HEIGHT_C*width_c*(num_mb_ver/slice_num); rec_index = tid_x*BLK_WIDTH + (tid_y&1)*width_ref_c*BLK_HEIGHT + j*MB_WIDTH_C*width_ref_c + (tid_y>>1)*height_ref_c*width_ref_c + blockIdx.x*MB_HEIGHT_C*width_ref_c*(num_mb_ver/slice_num); coef_index = (tid&3)*16+j*num_mb_hor*MB_TOTAL_SIZE_C + (tid_y>>1)*MB_TOTAL_SIZE_C*num_mbs + first_mb*MB_TOTAL_SIZE_C; top_index = tid_x*BLK_WIDTH + (j*MB_HEIGHT_C-1)*width_ref_c + (tid_y>>1)*width_ref_c*height_ref_c + blockIdx.x*MB_HEIGHT_C*width_ref_c*(num_mb_ver/slice_num) ; for(int k =0;k< num_mb_hor;k++) { //将一个宏块的原始数据以光栅的形式导入共享存储器 for (int i=0;i<16;i++) { s_in_uv[tid+i*8]= dev_input_uv[input_index+(i>>3)*width_c*height_c+(i&7)*width_c]; } BlkMBInfo = dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB]; BlkMBInfo1 = dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB + 8]; TopAvailable = (BlkMBInfo.Loc & LOC_MB_TOP_EDGE) ? 0 : 1; LeftAvailable = (BlkMBInfo.Loc & LOC_MB_LEFT_EDGE) ? 0 : 1; if(TopAvailable) { s_top_neighbor_uv[tid].x = dev_recon_uv[top_index]; s_top_neighbor_uv[tid].y = dev_recon_uv[top_index+1]; s_top_neighbor_uv[tid].z = dev_recon_uv[top_index+2]; s_top_neighbor_uv[tid].w = dev_recon_uv[top_index+3]; } // 帧内预测 IntraChromaPrediction_cu( s_top_neighbor_uv, s_left_neighbor_uv, s_in_uv+tid_x*4+tid_y*32, TopAvailable, LeftAvailable, Pred_Row, Sad, pred_mode, tid_x, tid_y ); BlkMBInfo.IntraChromaMode = pred_mode; BlkMBInfo1.IntraChromaMode = pred_mode; dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB]=BlkMBInfo; dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB +8]=BlkMBInfo1; IntraChromaTransforms_cu( s_in_uv+tid_x*4+tid_y*32, Pred_Row, Rec_Row, Quant_Add, Quant_Shift, Dquant_Shift, Quant_tab, DQuant_tab, Qcoef_0_01, Qcoef_0_23, Qcoef_1_01, Qcoef_1_23, Qcoef_2_01, Qcoef_2_23, Qcoef_3_01, Qcoef_3_23, QcoefDc, TempDC, tid_x, tid_y ); for(int i=0;i<4;i++) { dev_recon_uv[rec_index+i*width_ref_c+0] = Rec_Row[i].x; dev_recon_uv[rec_index+i*width_ref_c+1] = Rec_Row[i].y; dev_recon_uv[rec_index+i*width_ref_c+2] = Rec_Row[i].z; dev_recon_uv[rec_index+i*width_ref_c+3] = Rec_Row[i].w; } dev_dct_coefs_uv[coef_index] = Qcoef_0_01.x; dev_dct_coefs_uv[coef_index+1] = Qcoef_0_01.y; dev_dct_coefs_uv[coef_index+2] = Qcoef_0_23.x; dev_dct_coefs_uv[coef_index+3] = Qcoef_0_23.y; dev_dct_coefs_uv[coef_index+4] = Qcoef_1_01.x; dev_dct_coefs_uv[coef_index+5] = Qcoef_1_01.y; dev_dct_coefs_uv[coef_index+6] = Qcoef_1_23.x; dev_dct_coefs_uv[coef_index+7] = Qcoef_1_23.y; dev_dct_coefs_uv[coef_index+8] = Qcoef_2_01.x; dev_dct_coefs_uv[coef_index+9] = Qcoef_2_01.y; dev_dct_coefs_uv[coef_index+10] = Qcoef_2_23.x; dev_dct_coefs_uv[coef_index+11] = Qcoef_2_23.y; dev_dct_coefs_uv[coef_index+12] = Qcoef_3_01.x; dev_dct_coefs_uv[coef_index+13] = Qcoef_3_01.y; dev_dct_coefs_uv[coef_index+14] = Qcoef_3_23.x; dev_dct_coefs_uv[coef_index+15] = Qcoef_3_23.y; dev_dc_coefs_uv[(tid%4) + k*BLOCKS_PER_MB_C + j*BLOCKS_PER_MB_C*num_mb_hor +(tid_y>>1)*BLOCKS_PER_MB_C*num_mbs + first_mb*BLOCKS_PER_MB_C] = QcoefDc[tid]; // write mbinfo for next mb row if(tid_x == 1) { s_left_neighbor_uv [tid-1].x = s_left_neighbor_uv [tid].x = Rec_Row[0].w; s_left_neighbor_uv [tid-1].y = s_left_neighbor_uv [tid].y = Rec_Row[1].w; s_left_neighbor_uv [tid-1].z = s_left_neighbor_uv [tid].z = Rec_Row[2].w; s_left_neighbor_uv [tid-1].w = s_left_neighbor_uv [tid].w = Rec_Row[3].w; } input_index += MB_WIDTH_C; top_index += MB_WIDTH_C; rec_index += MB_WIDTH_C; coef_index += MB_TOTAL_SIZE_C; } } } inline __device__ void Chroma_inter_HadamardTransformAndQuantize_kernel( short *TempDC, int &Quant_Add, int &Quant_Shift, short &Quant_tab0, short *QCoefDC, int &tid ) { short TempCoef; int QAdd; int Sign; int QuantDCShift; QAdd = Quant_Add * 2; QuantDCShift = Quant_Shift + 1; if((tid&3)==0) { TempCoef = TempDC[tid] + TempDC[1+tid] + TempDC[2+tid] + TempDC[3+tid]; } else if((tid&3)==1) { TempCoef = TempDC[0+(tid>>2)*4] - TempDC[1+(tid>>2)*4] + TempDC[2+(tid>>2)*4] - TempDC[3+(tid>>2)*4]; } else if((tid&3)==2) { TempCoef = TempDC[0+(tid>>2)*4] + TempDC[1+(tid>>2)*4] - TempDC[2+(tid>>2)*4] - TempDC[3+(tid>>2)*4]; } else { TempCoef = TempDC[0+(tid>>2)*4] - TempDC[1+(tid>>2)*4] - TempDC[2+(tid>>2)*4] + TempDC[3+(tid>>2)*4]; } Sign = (TempCoef >= 0) ? 1 : -1; TempCoef = (TempCoef >= 0) ? TempCoef : -TempCoef; TempCoef = min (((TempCoef * Quant_tab0 + QAdd) >> QuantDCShift), TempCoef); QCoefDC[tid] = Sign * TempCoef; } //哈达码变换的逆变换 inline __device__ void Chroma_inter_IHadamard2x2AndDQuant_kernel( short *TempDC, int &DQuant_Shift, short &DQuant_tab0, short *QCoefDC, int &tid ) { short TempCoef; QCoefDC[tid] = (TempDC[tid] * DQuant_tab0 << DQuant_Shift); if((tid&3)==0) { TempCoef = QCoefDC[tid] + QCoefDC[1+tid] + QCoefDC[2+tid] + QCoefDC[3+tid]; } else if((tid&3)==1) { TempCoef = QCoefDC[0+(tid>>2)*4] - QCoefDC[1+(tid>>2)*4] + QCoefDC[2+(tid>>2)*4] - QCoefDC[3+(tid>>2)*4]; } else if((tid&3)==2) { TempCoef = QCoefDC[0+(tid>>2)*4] + QCoefDC[1+(tid>>2)*4] - QCoefDC[2+(tid>>2)*4] - QCoefDC[3+(tid>>2)*4]; } else { TempCoef = QCoefDC[0+(tid>>2)*4] - QCoefDC[1+(tid>>2)*4] - QCoefDC[2+(tid>>2)*4] + QCoefDC[3+(tid>>2)*4]; } QCoefDC [tid] = (TempCoef>>1); } inline __device__ void Chroma_inter_TransformAndQuantize_kernel(short *Diff,short *Dc_coef,short *Quant_tables,int &QAdd,int &Quant_Shift,int &tx,int &ty,int &tz) { short Sum0,Sum1,Diff0,Diff1; short coef0,coef1,coef2,coef3; int tid_index; int sign; //碟形算法垂直部分 tid_index = tx+(ty&1)*8+(ty>>1)*64+tz*128; Sum0 = (Diff[tid_index] + Diff[tid_index+48]); Sum1 = (Diff[tid_index+16] + Diff[tid_index+32]); Diff0 = (Diff[tid_index] - Diff[tid_index+48]); Diff1 = (Diff[tid_index+16] - Diff[tid_index+32]); Diff[tid_index] = Sum0 + Sum1; Diff[tid_index+32] = Sum0 - Sum1; Diff[tid_index+16] = 2 * Diff0 + Diff1; Diff[tid_index+48] = Diff0 - 2 * Diff1; __syncthreads(); //同步,也许不需要,因为做水平碟形算法需要通信的线程在一个warp中 //按照相邻的4个线程处理一个4*4子宏块的方式组织线程 tid_index = ((tx&3)<<4)+((tx>>2)<<2)+((ty>>1)<<3)+((ty&1)<<6)+(tz<<7); //等效于tid_index = (tx&3)*16+(tx>>2)*4+(ty>>1)*8+(ty&1)*64+tz*128; Sum0 = (Diff[tid_index] + Diff[tid_index+3]); Sum1 = (Diff[tid_index+1] + Diff[tid_index+2]); Diff0 = (Diff[tid_index] - Diff[tid_index+3]); Diff1 = (Diff[tid_index+1] - Diff[tid_index+2]); coef0= Sum0 + Sum1; coef2 = Sum0 - Sum1; coef1 = 2 * Diff0 + Diff1; coef3 = Diff0 - 2 * Diff1; if((tx&3) == 0 ) //保存直流分量,只在0和4号线程处需要处理 { Dc_coef[(tx>>2)+(ty<<1)+(tz<<3)] = coef0; } //量化,按照8x8块的方式以blk-falt的格式输出(开始时是以光删形式排列的) tid_index = tx*4+ty*32+tz*128; sign = (coef0 >= 0) ? 1 : -1; coef0 = (coef0 >= 0) ? coef0 : -coef0; Diff[tid_index] = sign * ((coef0 * Quant_tables[(tx&3)*4] + QAdd) >> Quant_Shift); sign = (coef1 >= 0) ? 1 : -1; coef1 = (coef1 >= 0) ? coef1 : -coef1; Diff[tid_index+1] = sign * ((coef1 * Quant_tables[(tx&3)*4+1] + QAdd) >> Quant_Shift); sign = (coef2 >= 0) ? 1 : -1; coef2 = (coef2 >= 0) ? coef2 : -coef2; Diff[tid_index+2] = sign * ((coef2 * Quant_tables[(tx&3)*4+2] + QAdd) >> Quant_Shift); sign = (coef3 >= 0) ? 1 : -1; coef3 = (coef3 >= 0) ? coef3 : -coef3; Diff[tid_index+3] = sign * ((coef3 * Quant_tables[(tx&3)*4+3] + QAdd) >> Quant_Shift); } //色度分量的逆变换和反量化 inline __device__ void Chroma_inter_DQuantAndITransform_kernel(short *dct_coef,short * dc_coef,unsigned int *pred,short *DQuant_tables,int &DQuant_Shift,int &tx,int &ty,int &tz) { short Sum0,Sum1,Diff0,Diff1; short coef0,coef4,coef8,coef12; int tid_index; int tid_block = tx+ty*8+tz*32; //反量化 dct_coef[tid_block] = (dct_coef[tid_block] * DQuant_tables[tx+(ty&1)*8]) << DQuant_Shift; dct_coef[tid_block+64] = (dct_coef[tid_block+64] * DQuant_tables[tx+(ty&1)*8]) << DQuant_Shift; dct_coef[tid_block+128] = (dct_coef[tid_block+128] * DQuant_tables[tx+(ty&1)*8]) << DQuant_Shift; dct_coef[tid_block+192] = (dct_coef[tid_block+192] * DQuant_tables[tx+(ty&1)*8]) << DQuant_Shift; __syncthreads(); //替换直流系数 if(tid_block < 16) { dct_coef[tid_block*16] = dc_coef[tid_block]; } __syncthreads(); tid_index = tx*4+ty*32+tz*128; //tid_index = tx*4+ty*blockDim.x*4+tz*blockDim.x*blockDim.y*4; //横向碟形算法 Sum0 = dct_coef[tid_index] + dct_coef[tid_index+2]; Diff0 = dct_coef[tid_index] - dct_coef[tid_index+2]; Diff1 = (dct_coef[tid_index+1] >> 1) - dct_coef[tid_index+3]; Sum1 = dct_coef[tid_index+1] + (dct_coef[tid_index+3] >> 1); dct_coef[tid_index] = Sum0 + Sum1; dct_coef[tid_index+1] = Diff0 + Diff1; dct_coef[tid_index+2] = Diff0 - Diff1; dct_coef[tid_index+3] = Sum0 - Sum1; __syncthreads(); //垂直碟形算法 tid_index = (tx&3)+((tx>>2)<<4)+ty*32+tz*128; Sum0 = (dct_coef[tid_index] + dct_coef[tid_index+8]); Sum1 = dct_coef[tid_index+4] + (dct_coef[tid_index+12]>>1); Diff0 = (dct_coef[tid_index] - dct_coef[tid_index+8]); Diff1 = ((dct_coef[tid_index+4]>>1) - dct_coef[tid_index+12]); tid_index = tx + ((ty&1)<<6) + ((ty>>1)<<3) + (tz<<7); coef0 = (Sum0 + Sum1 + 32) >> 6; coef0 = coef0 + pred[tid_index]; coef4 = (Diff0 + Diff1 + 32) >> 6; coef4 = coef4 + pred[tid_index+16]; coef8 = (Diff0 - Diff1 + 32) >> 6; coef8 = coef8 + pred[tid_index+32]; coef12 = (Sum0 - Sum1 + 32) >> 6; coef12 = coef12 + pred[tid_index+48]; pred[tid_index] = (unsigned char)(coef0 < 0 ? 0 :((coef0 > 255) ? 255 : coef0)); pred[tid_index+16] = (unsigned char)(coef4 < 0 ? 0 :((coef4 > 255) ? 255 : coef4)); pred[tid_index+32] = (unsigned char)(coef8 < 0 ? 0 :((coef8 > 255) ? 255 : coef8)); pred[tid_index+48] = (unsigned char)(coef12 < 0 ? 0 :((coef12 > 255) ? 255 : coef12)); } __global__ void MotionCompensateChroma_kernel( unsigned char *dev_ref_uv, unsigned char *dev_pred_uv, S_BLK_MB_INFO *dev_blk_mb_info, int enc_width_c, int enc_height_c, int ref_width_c, int ref_height_c, int RefStride2BeginUV/*, int *dev_index, unsigned char *dev_pred_ref*/ ) { int tid_x = threadIdx.x; int tid_y = threadIdx.y; int tid_z = threadIdx.z; int tid_blk = tid_x + tid_y*blockDim.x + tid_z*blockDim.x*blockDim.y; /*__shared__ unsigned char ref_uv[9*9*2];*/ __shared__ unsigned int ref_uv[9*9*2]; __shared__ S_BLK_MB_INFO blk_mb_info[BLOCKS_PER_MB]; int index_blk_info; //每个线程计算的像素所属的2*2块的位置 int mv_x,mv_y,ref_x,ref_y,FracY,FracX; int ref_offset,pre_offset; unsigned int left0,right0,left1,right1,result; int value0,value1,value2,value3; index_blk_info = (tid_x>>1) + (tid_y>>1)*4; pre_offset = tid_x + tid_y*enc_width_c + tid_z*enc_width_c*enc_height_c + blockIdx.x*MB_WIDTH_C+blockIdx.y*MB_HEIGHT_C*enc_width_c; //为每一个block加载对应的MB info, if(tid_blk < 16) { blk_mb_info[tid_blk] = dev_blk_mb_info[tid_blk + blockIdx.x*BLOCKS_PER_MB+blockIdx.y*(enc_width_c>>3)*BLOCKS_PER_MB]; } __syncthreads(); mv_x = blk_mb_info[index_blk_info].MV.x; mv_y = blk_mb_info[index_blk_info].MV.y; ref_x = (mv_x>>3) + tid_x + blockIdx.x*8; ref_y = (mv_y>>3) + tid_y + blockIdx.y*8; FracY = (mv_y & 0x7); FracX = (mv_x & 0x7); ref_x = (ref_x < -MB_WIDTH_C ) ? -MB_WIDTH_C :((ref_x > (ref_width_c - 1)) ? (ref_width_c - 1) : ref_x); ref_y = (ref_y < -MB_HEIGHT_C ) ? -MB_HEIGHT_C :((ref_y > (ref_height_c - 1)) ? (ref_height_c - 1) : ref_y); ref_offset = ref_x + ref_y*ref_width_c + RefStride2BeginUV+tid_z*ref_width_c*ref_height_c; //加载一个8*8宏块需要的参考数据 ref_uv[tid_x+tid_y*9+tid_z*9*9] = (unsigned int)dev_ref_uv[ref_offset]; //加载每一行的第九个像素,由第8列线程加载 if(tid_y==0) { ref_x = (ref_x + (blockDim.x-tid_x)); ref_y = (ref_y + tid_x); ref_x = (ref_x < -MB_WIDTH_C ) ? -MB_WIDTH_C :((ref_x > (ref_width_c - 1)) ? (ref_width_c - 1) : ref_x); ref_y = (ref_y < -MB_HEIGHT_C ) ? -MB_HEIGHT_C :((ref_y > (ref_height_c - 1)) ? (ref_height_c - 1) : ref_y); ref_offset = ref_x + ref_y*ref_width_c + RefStride2BeginUV+tid_z*ref_width_c*ref_height_c; ref_uv[tid_x*9+tid_z*9*9+8] = (unsigned int)dev_ref_uv[ref_offset]; } if(tid_y == (blockDim.y - 1)) { ref_x = (ref_x < -MB_WIDTH_C ) ? -MB_WIDTH_C :((ref_x > (ref_width_c - 1)) ? (ref_width_c - 1) : ref_x); ref_y = ((ref_y+1) < -MB_HEIGHT_C ) ? -MB_HEIGHT_C :(((ref_y+1) > (ref_height_c - 1)) ? (ref_height_c - 1) : (ref_y+1)); ref_offset = ref_x + ref_y*ref_width_c + RefStride2BeginUV+tid_z*ref_width_c*ref_height_c; ref_uv[tid_x + blockDim.y*9+tid_z*9*9] = (unsigned int)dev_ref_uv[ref_offset]; } if((tid_x==(blockDim.x-1))&&(tid_y==(blockDim.y-1))) { ref_x = (((ref_x+1) < -MB_WIDTH_C ) ? -MB_WIDTH_C :(((ref_x+1) > (ref_width_c - 1)) ? (ref_width_c - 1) : (ref_x+1))); ref_offset = ref_x + ref_y*ref_width_c + RefStride2BeginUV+tid_z*ref_width_c*ref_height_c; ref_uv[(tid_z+1)*9*9-1] = (unsigned int)dev_ref_uv[ref_offset]; } __syncthreads(); left0 = ref_uv[tid_x + tid_y*9+tid_z*81]; right0 = ref_uv[tid_x + tid_y*9+tid_z*81 + 1]; left1 = ref_uv[tid_x + (tid_y+1)*9+tid_z*81]; right1 = ref_uv[tid_x + (tid_y+1)*9+tid_z*81+1]; value0 = (8 - FracX) * (8 - FracY) * (int)(left0); value1 = ( FracX) * (8 - FracY) * (int)(right0); value2 = (8 - FracX) * ( FracY) * (int)(left1); value3 = ( FracX) * ( FracY) * (int)(right1); result = (unsigned char)((value0 + value1 + value2 + value3 + 32) >> 6); dev_pred_uv[pre_offset] = result; } //P帧色度分量帧间编码 __global__ void ChromaPFrameInterResidualCoding_kernel( unsigned char *dev_input_uv, unsigned char *dev_pred_uv, unsigned char *dev_recon_uv, short *dev_dct_coefs_uv, short *dev_dc_coefs_uv, short *dev_Quant_tables, short *dev_Dquant_tables, S_QP_DATA *dev_QpData, int enc_width_c, int enc_height_c, int ref_width_c, int ref_height_c, int num_mb_hor, int num_mb_ver ) { //dim3 threads(8,4,2); int tid_x = threadIdx.x; int tid_y = threadIdx.y; int tid_z = threadIdx.z; int tid_blk = tid_x+ (tid_y+tid_z*blockDim.y)*blockDim.x; int offset_input = blockIdx.x*2*MB_WIDTH_C+blockIdx.y*MB_HEIGHT_C*enc_width_c + tid_z*enc_width_c*enc_height_c; // 每一个Block开始处理的宏块的起始位置,注意一个block处理两个宏块 int offset_output = blockIdx.x*2*MB_WIDTH_C+blockIdx.y*MB_HEIGHT_C*ref_width_c +tid_z*ref_width_c*ref_height_c; //每一个Block对应的重建帧的起始位置 int tid_index = tid_z*MB_HEIGHT_C*MB_HEIGHT_C*2+tid_y*8+tid_x; //Block内每一个线程对应像素的位置, int out_dct_index,out_rec_index; // unsigned char src_input; __shared__ unsigned int pred_in[MB_TOTAL_SIZE_C*4]; __shared__ unsigned int src_in[MB_TOTAL_SIZE_C*4]; __shared__ short Diff[MB_TOTAL_SIZE_C*4]; __shared__ short Dc_coef[16]; __shared__ short Dc_coef_temp[16]; __shared__ short Quant_tables[16]; __shared__ short DQuant_tables[16]; if(tid_blk < 16) { Quant_tables[tid_blk] = dev_Quant_tables[tid_blk]; DQuant_tables[tid_blk] = dev_Dquant_tables[tid_blk]; } int Quant_Add = dev_QpData->QuantAdd; int Quant_Shift = dev_QpData->QuantShift; int Dquant_Shift = dev_QpData->DQuantShift; //按照两个宏块并排的形式(光栅形式排列,前16个像素分别属于不同的两个宏块,得到16*8的块大小) for(int i=0;i<4;i++) { src_in[tid_index+i*32] = ( unsigned int)dev_input_uv[offset_input + tid_x+(tid_y&1)*8 + (tid_y>>1)*enc_width_c + i*2*enc_width_c]; pred_in[tid_index+i*32] = (unsigned int)dev_pred_uv[offset_input + tid_x+(tid_y&1)*8 + (tid_y>>1)*enc_width_c + i*2*enc_width_c]; Diff[tid_index+i*32] = (short)(src_in[tid_index+i*32] - pred_in[tid_index+i*32]); } __syncthreads(); //色度分量的DCT变换和量化 Chroma_inter_TransformAndQuantize_kernel(Diff,Dc_coef_temp,Quant_tables,Quant_Add,Quant_Shift,tid_x,tid_y,tid_z); __syncthreads(); //哈达码变换和逆变换,共4个2*2块的变换,由16个线程完成 if(tid_blk < 16) { int out_dc_index = (tid_blk&7)+blockIdx.x*2*BLOCKS_PER_MB_C+blockIdx.y*BLOCKS_PER_MB_C*num_mb_hor+tid_y*num_mb_hor*num_mb_ver*BLOCKS_PER_MB_C; Chroma_inter_HadamardTransformAndQuantize_kernel( Dc_coef_temp, Quant_Add, Quant_Shift, Quant_tables[0], Dc_coef, tid_blk ); dev_dc_coefs_uv[out_dc_index] = Dc_coef[tid_blk]; Chroma_inter_IHadamard2x2AndDQuant_kernel( Dc_coef, Dquant_Shift, DQuant_tables[0], Dc_coef_temp, tid_blk ); } __syncthreads(); out_dct_index = tid_x+tid_y*blockDim.x+blockIdx.x*2*MB_TOTAL_SIZE_C+blockIdx.y*num_mb_hor*MB_TOTAL_SIZE_C+tid_z*num_mb_hor*num_mb_ver*MB_TOTAL_SIZE_C; for(int i=0;i<4;i++) { dev_dct_coefs_uv[out_dct_index+i*32] = Diff[tid_x+tid_y*blockDim.x + tid_z*2*MB_TOTAL_SIZE_C + i*32]; } __syncthreads(); //逆变换和反量化 Chroma_inter_DQuantAndITransform_kernel(Diff, Dc_coef_temp, pred_in, DQuant_tables, Dquant_Shift, tid_x, tid_y, tid_z); out_rec_index = offset_output + tid_x+(tid_y&1)*8 + (tid_y>>1)*ref_width_c; for(int i = 0;i <4;i++) { dev_recon_uv[out_rec_index + i*2*ref_width_c] = pred_in[tid_x + tid_y*blockDim.x + tid_z*2*MB_TOTAL_SIZE_C + i*32]; } } __global__ void Chroma_PFrame_Intra_ResidualCoding_kernel( unsigned char *dev_input_uv, unsigned char *dev_recon_uv, S_BLK_MB_INFO *dev_blk_mb_info, short *dev_dct_coefs_uv, short *dev_dc_coefs_uv, short *dev_Quant_tables, short *dev_Dquant_tables, S_QP_DATA *dev_QpData, int enc_width_c, int enc_height_c, int ref_width_c, int ref_height_c, int num_mb_hor, int num_mb_ver, int slice_num ) { //dim3 threads(2,4,1); __shared__ uchar4 s_top_neighbor[8]; __shared__ uchar4 s_left_neighbor[8]; __shared__ unsigned char src_in[MB_TOTAL_SIZE_C*2]; __shared__ unsigned int Sad[8]; __shared__ short Quant_tab[16]; __shared__ short DQuant_tab[16]; short2 Qcoef_0_01, Qcoef_0_23, Qcoef_1_01, Qcoef_1_23, Qcoef_2_01, Qcoef_2_23, Qcoef_3_01, Qcoef_3_23; uchar4 Pred_Row[4],Rec_Row[4]; __shared__ short QcoefDc[8],TempDC[8]; int TopAvailable, LeftAvailable,pred_mode; S_BLK_MB_INFO BlkMBInfo; S_BLK_MB_INFO BlkMBInfo1; int Quant_Add; int Quant_Shift; int Dquant_Shift; int num_mbs = num_mb_hor*num_mb_ver; int rec_index,coef_index; int first_mb; int tid_x,tid_y; tid_x = threadIdx.x; tid_y = threadIdx.y; int tid = tid_x + tid_y * blockDim.x; Quant_tab[tid] = dev_Quant_tables[tid]; Quant_tab[tid+8] = dev_Quant_tables[tid+8]; DQuant_tab[tid] = dev_Dquant_tables[tid]; DQuant_tab[tid+8] = dev_Dquant_tables[tid+8]; Quant_Add = dev_QpData->QuantAdd; Quant_Shift = dev_QpData->QuantShift; Dquant_Shift = dev_QpData->DQuantShift; first_mb = blockIdx.x*num_mb_hor* num_mb_ver/slice_num; //将全局存储器中的数据加载到共享存储器 for(int j =0;j< num_mb_ver/slice_num;j++) { //input_index = tid_x*BLK_WIDTH +(tid_y&1)*enc_width_c*BLK_HEIGHT + j*enc_width_c*MB_WIDTH_C + (tid_y>>1)*enc_width_c*enc_height_c; rec_index = tid_x*BLK_WIDTH + (tid_y&1)*ref_width_c*BLK_HEIGHT + j*MB_HEIGHT_C*ref_width_c + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num); coef_index = ((tid&3)<<4)+j*num_mb_hor*MB_TOTAL_SIZE_C + (tid_y>>1)*enc_width_c*enc_height_c + first_mb*MB_TOTAL_SIZE_C; s_left_neighbor[tid].x = 0; s_left_neighbor[tid].y = 0; s_left_neighbor[tid].z = 0; s_left_neighbor[tid].w = 0; for(int k =0;k< num_mb_hor;k++) { BlkMBInfo = dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB]; BlkMBInfo1 = dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB +8 + first_mb*BLOCKS_PER_MB]; if((BlkMBInfo.Type == INTRA_LARGE_BLOCKS_MB_TYPE) || (BlkMBInfo.Type == INTRA_SMALL_BLOCKS_MB_TYPE)) { //将一个宏块的原始数据以光栅的形式导入共享存储器 for (int i=0;i<16;i++) { src_in[tid+i*8]= dev_input_uv[tid+k*MB_WIDTH_C+j*enc_width_c*MB_WIDTH_C+(i>>3)*enc_width_c*enc_height_c+(i&7)*enc_width_c + blockIdx.x*MB_HEIGHT_C*enc_width_c*(num_mb_ver/slice_num)]; } if(j==0) { s_top_neighbor[tid].x = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; s_top_neighbor[tid].y = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 1]; s_top_neighbor[tid].z = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 2]; s_top_neighbor[tid].w = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 3]; } else { s_top_neighbor[tid].x = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (j*MB_HEIGHT_C-1)*ref_width_c + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; s_top_neighbor[tid].y = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (j*MB_HEIGHT_C-1)*ref_width_c + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 1]; s_top_neighbor[tid].z = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (j*MB_HEIGHT_C-1)*ref_width_c + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 2]; s_top_neighbor[tid].w = dev_recon_uv[tid_x*4 + k*MB_WIDTH_C + (j*MB_HEIGHT_C-1)*ref_width_c + (tid_y>>1)*ref_width_c*ref_height_c + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num) + 3]; } TopAvailable = (BlkMBInfo.Loc & LOC_MB_TOP_EDGE) ? 0 : 1; LeftAvailable = (BlkMBInfo.Loc & LOC_MB_LEFT_EDGE) ? 0 : 1; // 帧内预测 IntraChromaPrediction_cu( s_top_neighbor, s_left_neighbor, src_in+tid_x*4+tid_y*32, TopAvailable, LeftAvailable, Pred_Row, Sad, pred_mode, tid_x, tid_y ); BlkMBInfo.IntraChromaMode = pred_mode; BlkMBInfo1.IntraChromaMode = pred_mode; dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB + first_mb*BLOCKS_PER_MB]=BlkMBInfo; dev_blk_mb_info[tid+k*BLOCKS_PER_MB+j*num_mb_hor*BLOCKS_PER_MB +8 + first_mb*BLOCKS_PER_MB]=BlkMBInfo1; IntraChromaTransforms_cu( src_in+tid_x*4+tid_y*32, Pred_Row, Rec_Row, Quant_Add, Quant_Shift, Dquant_Shift, Quant_tab, DQuant_tab, Qcoef_0_01, Qcoef_0_23, Qcoef_1_01, Qcoef_1_23, Qcoef_2_01, Qcoef_2_23, Qcoef_3_01, Qcoef_3_23, QcoefDc, TempDC, tid_x, tid_y ); for(int i=0;i<4;i++) { dev_recon_uv[rec_index+i*ref_width_c+0] = Rec_Row[i].x; dev_recon_uv[rec_index+i*ref_width_c+1] = Rec_Row[i].y; dev_recon_uv[rec_index+i*ref_width_c+2] = Rec_Row[i].z; dev_recon_uv[rec_index+i*ref_width_c+3] = Rec_Row[i].w; } dev_dct_coefs_uv[coef_index] = Qcoef_0_01.x; dev_dct_coefs_uv[coef_index+1] = Qcoef_0_01.y; dev_dct_coefs_uv[coef_index+2] = Qcoef_0_23.x; dev_dct_coefs_uv[coef_index+3] = Qcoef_0_23.y; dev_dct_coefs_uv[coef_index+4] = Qcoef_1_01.x; dev_dct_coefs_uv[coef_index+5] = Qcoef_1_01.y; dev_dct_coefs_uv[coef_index+6] = Qcoef_1_23.x; dev_dct_coefs_uv[coef_index+7] = Qcoef_1_23.y; dev_dct_coefs_uv[coef_index+8] = Qcoef_2_01.x; dev_dct_coefs_uv[coef_index+9] = Qcoef_2_01.y; dev_dct_coefs_uv[coef_index+10] = Qcoef_2_23.x; dev_dct_coefs_uv[coef_index+11] = Qcoef_2_23.y; dev_dct_coefs_uv[coef_index+12] = Qcoef_3_01.x; dev_dct_coefs_uv[coef_index+13] = Qcoef_3_01.y; dev_dct_coefs_uv[coef_index+14] = Qcoef_3_23.x; dev_dct_coefs_uv[coef_index+15] = Qcoef_3_23.y; dev_dc_coefs_uv[(tid&3) + k*BLOCKS_PER_MB_C + j*BLOCKS_PER_MB_C*num_mb_hor +(tid_y>>1)*BLOCKS_PER_MB_C*num_mbs + first_mb*BLOCKS_PER_MB_C] = QcoefDc[tid]; if(tid_x == 1) { s_left_neighbor [tid-1].x = s_left_neighbor [tid].x = Rec_Row[0].w; s_left_neighbor [tid-1].y = s_left_neighbor [tid].y = Rec_Row[1].w; s_left_neighbor [tid-1].z = s_left_neighbor [tid].z = Rec_Row[2].w; s_left_neighbor [tid-1].w = s_left_neighbor [tid].w = Rec_Row[3].w; } } else { s_left_neighbor [tid].x = dev_recon_uv[k*MB_WIDTH_C + j*MB_HEIGHT_C*ref_width_c + (tid_y&1)*4*ref_width_c +(tid_y>>1)*ref_width_c*ref_height_c + 7 + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; s_left_neighbor [tid].y = dev_recon_uv[k*MB_WIDTH_C + j*MB_HEIGHT_C*ref_width_c + (tid_y&1)*4*ref_width_c +(tid_y>>1)*ref_width_c*ref_height_c + ref_width_c + 7 + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; s_left_neighbor [tid].z = dev_recon_uv[k*MB_WIDTH_C + j*MB_HEIGHT_C*ref_width_c + (tid_y&1)*4*ref_width_c +(tid_y>>1)*ref_width_c*ref_height_c + 2*ref_width_c + 7 + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; s_left_neighbor [tid].w = dev_recon_uv[k*MB_WIDTH_C + j*MB_HEIGHT_C*ref_width_c + (tid_y&1)*4*ref_width_c +(tid_y>>1)*ref_width_c*ref_height_c + 3*ref_width_c + 7 + blockIdx.x*MB_HEIGHT_C*ref_width_c*(num_mb_ver/slice_num)]; } rec_index += MB_WIDTH_C; coef_index += MB_TOTAL_SIZE_C; } } }
620cfe222589616a0c0eef6ddf62d912102d6dd9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WHYDRO2 #include <stdio.h> #include <stdlib.h> #include <string.h> #include "prototypes.h" #include <mpi.h> //#include <cudpp.h> #include "gpu_type.h" #define NITERMAX 10 #define ERRTOL 1e-10 #define DEFDENSG 10. extern "C" struct OCT *gatherstencil(struct OCT *octstart, struct HGRID *stencil, int stride, struct CPUINFO *cpu, int *nread); extern "C" struct OCT *scatterstencil(struct OCT *octstart, struct HGRID *stencil, int stride, struct CPUINFO *cpu, REAL dxcur, REAL dtnew); extern "C" void create_hydstencil_GPU(struct CPUINFO *cpu, int stride); extern "C" int advancehydroGPU(struct OCT **firstoct, int level, struct CPUINFO *cpu, struct HGRID *stencil, int stride, REAL dxcur, REAL dtnew); extern "C" void create_pinned_stencil(struct HGRID **stencil, int stride); extern "C" void destroy_hydstencil_GPU(struct CPUINFO *cpu, int stride); extern "C" void destroy_pinned_stencil(struct HGRID **stencil, int stride); // =================================================================== void create_hydstencil_GPU(struct CPUINFO *cpu, int stride){ hipMalloc((void **)&(cpu->hyd_stencil),sizeof(struct HGRID)*stride); } // =================================================================== void create_pinned_stencil(struct HGRID **stencil, int stride){ hipHostMalloc( (void**)stencil, sizeof(struct HGRID)*stride ); CUDA_CHECK_ERROR("GPU hydro alloc"); } // =================================================================== void destroy_hydstencil_GPU(struct CPUINFO *cpu, int stride){ hipFree(cpu->hyd_stencil); } // =================================================================== void destroy_pinned_stencil(struct HGRID **stencil, int stride){ hipHostFree(stencil); } __device__ void dinitUtype(struct Utype* u){ u->d=0; u->du=0; u->dv=0; u->dw=0; u->E=0; u->eint=0; #ifdef WRADHYD u->dX=0; #ifdef HELIUM u->dXHE=0; u->dXXHE=0; #endif // HELIUM #endif // WRADHYD } // ============================================================================================================== __device__ void dgetE(struct Wtype *W){ W->E=W->p/(GAMMA-1.)+0.5*W->d*(W->u*W->u+W->v*W->v+W->w*W->w); } // ======================================================= __device__ void getcellnei_gpu_hydro(int cindex, int *neip, int *cell) { switch(cindex){ case 0: neip[0]=0;cell[0]=1; neip[1]=6;cell[1]=1; neip[2]=2;cell[2]=2; neip[3]=6;cell[3]=2; neip[4]=4;cell[4]=4; neip[5]=6;cell[5]=4; break; case 1: neip[0]=6;cell[0]=0; neip[1]=1;cell[1]=0; neip[2]=2;cell[2]=3; neip[3]=6;cell[3]=3; neip[4]=4;cell[4]=5; neip[5]=6;cell[5]=5; break; case 2: neip[0]=0;cell[0]=3; neip[1]=6;cell[1]=3; neip[2]=6;cell[2]=0; neip[3]=3;cell[3]=0; neip[4]=4;cell[4]=6; neip[5]=6;cell[5]=6; break; case 3: neip[0]=6;cell[0]=2; neip[1]=1;cell[1]=2; neip[2]=6;cell[2]=1; neip[3]=3;cell[3]=1; neip[4]=4;cell[4]=7; neip[5]=6;cell[5]=7; break; case 4: neip[0]=0;cell[0]=5; neip[1]=6;cell[1]=5; neip[2]=2;cell[2]=6; neip[3]=6;cell[3]=6; neip[4]=6;cell[4]=0; neip[5]=5;cell[5]=0; break; case 5: neip[0]=6;cell[0]=4; neip[1]=1;cell[1]=4; neip[2]=2;cell[2]=7; neip[3]=6;cell[3]=7; neip[4]=6;cell[4]=1; neip[5]=5;cell[5]=1; break; case 6: neip[0]=0;cell[0]=7; neip[1]=6;cell[1]=7; neip[2]=6;cell[2]=4; neip[3]=3;cell[3]=4; neip[4]=6;cell[4]=2; neip[5]=5;cell[5]=2; break; case 7: neip[0]=6;cell[0]=6; neip[1]=1;cell[1]=6; neip[2]=6;cell[2]=5; neip[3]=3;cell[3]=5; neip[4]=6;cell[4]=3; neip[5]=5;cell[5]=3; break; } } // ==================== converts U -> W __device__ void dU2W(struct Utype *U, struct Wtype *W) { REAL dloc=(U->d==0.?DEFDENSG:U->d); W->d=U->d; W->u=U->du/dloc; W->v=U->dv/dloc; W->w=U->dw/dloc; #ifdef DUAL_E W->p=U->eint*(GAMMA-1.); #else W->p=(GAMMA-1.)*(U->E-((U->du)*(U->du)+(U->dv)*(U->dv)+(U->dw)*(U->dw))/(dloc)*0.5); #endif W->E=U->E; #ifdef WRADHYD W->dX=U->dX; #ifdef HELIUM W->dXHE=U->dXHE; W->dXXHE=U->dXXHE; #endif // HELIUM #endif // WRADHYD W->a=SQRT(GAMMA*W->p/dloc); } // ==================== converts W -> U __device__ void dW2U(struct Wtype *W, struct Utype *U) { U->d=W->d; U->du=W->d*W->u; U->dv=W->d*W->v; U->dw=W->d*W->w; U->eint=W->p/(GAMMA-1.); U->E=W->E; #ifdef WRADHYD U->dX=W->dX; #ifdef HELIUM U->dXHE=W->dXHE; U->dXXHE=W->dXXHE; #endif #endif } // --------------------------------------------------------------- __device__ void dgetflux_X(struct Utype *U, REAL *f) { f[0]=U->du; f[1]=0.5*(3.-GAMMA)*U->du*U->du/U->d+(GAMMA-1.)*U->E-0.5*(GAMMA-1.)*(U->dv*U->dv+U->dw*U->dw)/U->d; f[2]=U->du*U->dv/U->d; f[3]=U->du*U->dw/U->d; f[4]=GAMMA*U->du/U->d*U->E-0.5*(GAMMA-1.)*U->du/(U->d*U->d)*(U->du*U->du+U->dv*U->dv+U->dw*U->dw); #ifdef WRADHYD f[6]=U->du*U->dX/U->d; #ifdef HELIUM f[7]=U->du*U->dXHE/U->d; f[8]=U->du*U->dXXHE/U->d; #endif #endif } // --------------------------------------------------------------- __device__ void dgetflux_Y(struct Utype *U, REAL *f) { f[0]=U->dv; f[1]=U->dv*U->du/U->d; f[2]=0.5*(3.-GAMMA)*U->dv*U->dv/U->d+(GAMMA-1.)*U->E-0.5*(GAMMA-1.)*(U->du*U->du+U->dw*U->dw)/U->d; f[3]=U->dv*U->dw/U->d; f[4]=GAMMA*U->dv/U->d*U->E-0.5*(GAMMA-1.)*U->dv/(U->d*U->d)*(U->du*U->du+U->dv*U->dv+U->dw*U->dw); #ifdef WRADHYD f[6]=U->dv*U->dX/U->d; #ifdef HELIUM f[7]=U->dv*U->dXHE/U->d; f[8]=U->dv*U->dXXHE/U->d; #endif #endif } // --------------------------------------------------------------- __device__ void dgetflux_Z(struct Utype *U, REAL *f) { f[0]=U->dw; f[1]=U->dw*U->du/U->d; f[2]=U->dw*U->dv/U->d; f[3]=0.5*(3.-GAMMA)*U->dw*U->dw/U->d+(GAMMA-1.)*U->E-0.5*(GAMMA-1.)*(U->du*U->du+U->dv*U->dv)/U->d; f[4]=GAMMA*U->dw/U->d*U->E-0.5*(GAMMA-1.)*U->dw/(U->d*U->d)*(U->du*U->du+U->dv*U->dv+U->dw*U->dw); #ifdef WRADHYD f[6]=U->dw*U->dX/U->d; #ifdef HELIUM f[7]=U->dw*U->dXHE/U->d; f[8]=U->dw*U->dXXHE/U->d; #endif #endif } // ================== performs the difference between two Us __device__ void ddiffU(struct Utype *U2, struct Utype *U1, struct Utype *UR){ UR->d =U2->d - U1->d; UR->du=U2->du- U1->du; UR->dv=U2->dv- U1->dv; UR->dw=U2->dw- U1->dw; UR->E =U2->E - U1->E; UR->eint=U2->eint-U1->eint; } // ================== performs the difference between two Ws __device__ void ddiffW(struct Wtype *W2, struct Wtype *W1, struct Wtype *WR){ WR->d=W2->d- W1->d; WR->u=W2->u- W1->u; WR->v=W2->v- W1->v; WR->w=W2->w- W1->w; WR->p=W2->p- W1->p; #ifdef WRADHYD WR->dX=W2->dX- W1->dX; #ifdef HELIUM WR->dXHE=W2->dXHE- W1->dXHE; WR->dXXHE=W2->dXXHE- W1->dXXHE; #endif #endif } // ================= minmod __device__ void dminmod(struct Utype *Um, struct Utype *Up, struct Utype *Ur){ REAL beta=1.; // 1. for MINBEE 2. for SUPERBEE // FLUX LIMITER if(Up->d>0){ Ur->d=FMAX(FMAX(0.,FMIN(beta*Um->d,Up->d)),FMIN(Um->d,beta*Up->d)); } else{ Ur->d=FMIN(FMIN(0.,FMAX(beta*Um->d,Up->d)),FMAX(Um->d,beta*Up->d)); } if(Up->du>0){ Ur->du=FMAX(FMAX(0.,FMIN(beta*Um->du,Up->du)),FMIN(Um->du,beta*Up->du)); } else{ Ur->du=FMIN(FMIN(0.,FMAX(beta*Um->du,Up->du)),FMAX(Um->du,beta*Up->du)); } if(Up->dv>0){ Ur->dv=FMAX(FMAX(0.,FMIN(beta*Um->dv,Up->dv)),FMIN(Um->dv,beta*Up->dv)); } else{ Ur->dv=FMIN(FMIN(0.,FMAX(beta*Um->dv,Up->dv)),FMAX(Um->dv,beta*Up->dv)); } if(Up->dw>0){ Ur->dw=FMAX(FMAX(0.,FMIN(beta*Um->dw,Up->dw)),FMIN(Um->dw,beta*Up->dw)); } else{ Ur->dw=FMIN(FMIN(0.,FMAX(beta*Um->dw,Up->dw)),FMAX(Um->dw,beta*Up->dw)); } if(Up->E>0){ Ur->E=FMAX(FMAX(0.,FMIN(beta*Um->E,Up->E)),FMIN(Um->E,beta*Up->E)); } else{ Ur->E=FMIN(FMIN(0.,FMAX(beta*Um->E,Up->E)),FMAX(Um->E,beta*Up->E)); } } //=============================================== //=============================================== __device__ void dminmod_W(struct Wtype *Wm, struct Wtype *Wp, struct Wtype *Wr){ REAL beta=1.; // 1. for MINBEE 2. for SUPERBEE // FLUX LIMITER if(Wp->d>0){ Wr->d=FMAX(FMAX(0.,FMIN(beta*Wm->d,Wp->d)),FMIN(Wm->d,beta*Wp->d)); } else{ Wr->d=FMIN(FMIN(0.,FMAX(beta*Wm->d,Wp->d)),FMAX(Wm->d,beta*Wp->d)); } #ifdef WRADHYD if(Wp->dX>0){ Wr->dX=FMAX(FMAX(0.,FMIN(beta*Wm->dX,Wp->dX)),FMIN(Wm->dX,beta*Wp->dX)); } else{ Wr->dX=FMIN(FMIN(0.,FMAX(beta*Wm->dX,Wp->dX)),FMAX(Wm->dX,beta*Wp->dX)); } #ifdef HELIUM if(Wp->dXHE>0){ Wr->dXHE=FMAX(FMAX(0.,FMIN(beta*Wm->dXHE,Wp->dXHE)),FMIN(Wm->dXHE,beta*Wp->dXHE)); } else{ Wr->dXHE=FMIN(FMIN(0.,FMAX(beta*Wm->dXHE,Wp->dXHE)),FMAX(Wm->dXHE,beta*Wp->dXHE)); } if(Wp->dXXHE>0){ Wr->dXXHE=FMAX(FMAX(0.,FMIN(beta*Wm->dXXHE,Wp->dXXHE)),FMIN(Wm->dXXHE,beta*Wp->dXXHE)); } else{ Wr->dXXHE=FMIN(FMIN(0.,FMAX(beta*Wm->dXXHE,Wp->dXXHE)),FMAX(Wm->dXXHE,beta*Wp->dXXHE)); } #endif // HELIUM #endif // WRADHYD if(Wp->u>0){ Wr->u=FMAX(FMAX(0.,FMIN(beta*Wm->u,Wp->u)),FMIN(Wm->u,beta*Wp->u)); } else{ Wr->u=FMIN(FMIN(0.,FMAX(beta*Wm->u,Wp->u)),FMAX(Wm->u,beta*Wp->u)); } if(Wp->v>0){ Wr->v=FMAX(FMAX(0.,FMIN(beta*Wm->v,Wp->v)),FMIN(Wm->v,beta*Wp->v)); } else{ Wr->v=FMIN(FMIN(0.,FMAX(beta*Wm->v,Wp->v)),FMAX(Wm->v,beta*Wp->v)); } if(Wp->w>0){ Wr->w=FMAX(FMAX(0.,FMIN(beta*Wm->w,Wp->w)),FMIN(Wm->w,beta*Wp->w)); } else{ Wr->w=FMIN(FMIN(0.,FMAX(beta*Wm->w,Wp->w)),FMAX(Wm->w,beta*Wp->w)); } if(Wp->p>0){ Wr->p=FMAX(FMAX(0.,FMIN(beta*Wm->p,Wp->p)),FMIN(Wm->p,beta*Wp->p)); } else{ Wr->p=FMIN(FMIN(0.,FMAX(beta*Wm->p,Wp->p)),FMAX(Wm->p,beta*Wp->p)); } } // ============= interp minmod ==================================================== __device__ void dinterpminmod(struct Utype *U0, struct Utype *Up, struct Utype *Dx, struct Utype *Dy, struct Utype *Dz,REAL dx,REAL dy,REAL dz){ Up->d =U0->d + dx*Dx->d +dy*Dy->d +dz*Dz->d; Up->du=U0->du + dx*Dx->du +dy*Dy->du +dz*Dz->du; Up->dv=U0->dv + dx*Dx->dv +dy*Dy->dv +dz*Dz->dv; Up->dw=U0->dw + dx*Dx->dw +dy*Dy->dw +dz*Dz->dw; Up->E =U0->E + dx*Dx->E +dy*Dy->E +dz*Dz->E; Up->eint =U0->eint + dx*Dx->eint +dy*Dy->eint +dz*Dz->eint; } // ============= interp minmod ==================================================== __device__ void dinterpminmod_W(struct Wtype *W0, struct Wtype *Wp, struct Wtype *Dx, struct Wtype *Dy, struct Wtype *Dz,REAL dx,REAL dy,REAL dz){ Wp->d =W0->d +dx*Dx->d +dy*Dy->d +dz*Dz->d; Wp->u =W0->u +dx*Dx->u +dy*Dy->u +dz*Dz->u; Wp->v =W0->v +dx*Dx->v +dy*Dy->v +dz*Dz->v; Wp->w =W0->w +dx*Dx->w +dy*Dy->w +dz*Dz->w; Wp->p =W0->p +dx*Dx->p +dy*Dy->p +dz*Dz->p; #ifdef WRADHYD Wp->dX =W0->dX +dx*Dx->dX +dy*Dy->dX +dz*Dz->dX; #ifdef HELIUM Wp->dXHE =W0->dXHE +dx*Dx->dXHE +dy*Dy->dXHE +dz*Dz->dXHE; Wp->dXXHE =W0->dXXHE +dx*Dx->dXXHE +dy*Dy->dXXHE +dz*Dz->dXXHE; #endif #endif } __device__ void dmatrix_jacobian(struct Wtype *W0, REAL dt,REAL dx,struct Wtype *Dx,struct Wtype *Dy,struct Wtype *Dz, struct Wtype *Wt){ REAL M[25]; #ifdef HELIUM REAL W[8]={0.,0.,0.,0.,0.,0.,0.,0.}; #else REAL W[6]={0.,0.,0.,0.,0.,0.}; #endif // HELIUM REAL d[5]; int i,j; #ifdef WRADHYD REAL X; #endif // ===== building the A matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->u; // off_diagonal elements M[0+1*5]=W0->d; M[4+1*5]=W0->d*W0->a*W0->a; M[1+4*5]=1./W0->d; // ===== First Product d[0]=Dx->d; d[1]=Dx->u; d[2]=Dx->v; d[3]=Dx->w; d[4]=Dx->p; for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->u*Dx->dX+W0->dX*Dx->u; #ifdef HELIUM W[6]+=W0->u*Dx->dXHE+W0->dXHE*Dx->u; W[7]+=W0->u*Dx->dXXHE+W0->dXXHE*Dx->u; #endif // HELIUM #endif // WRADHYD // ===== building the B matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->v; // off_diagonal elements M[0+2*5]=W0->d; M[4+2*5]=W0->d*W0->a*W0->a; M[2+4*5]=1./W0->d; // ===== Second Product d[0]=Dy->d; d[1]=Dy->u; d[2]=Dy->v; d[3]=Dy->w; d[4]=Dy->p; for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->v*Dx->dX+W0->dX*Dx->v; #ifdef HELIUM W[6]+=W0->v*Dx->dXHE+W0->dXHE*Dx->v; W[7]+=W0->v*Dx->dXXHE+W0->dXXHE*Dx->v; #endif // HELIUM #endif // WRADHYD // ===== building the C matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->w; // off_diagonal elements M[0+3*5]=W0->d; M[4+3*5]=W0->d*W0->a*W0->a; M[3+4*5]=1./W0->d; d[0]=Dz->d; d[1]=Dz->u; d[2]=Dz->v; d[3]=Dz->w; d[4]=Dz->p; // ===== Third Product for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->w*Dx->dX+W0->w*Dx->dX; #ifdef HELIUM W[6]+=W0->w*Dx->dXHE+W0->dXHE*Dx->w; W[7]+=W0->w*Dx->dXXHE+W0->dXXHE*Dx->w; #endif // HELIUM #endif // WRADHYD // ==== Final correction for(i=0;i<6;i++){ W[i]*=(-dt/dx*0.5); } Wt->d=W[0]; Wt->u=W[1]; Wt->v=W[2]; Wt->w=W[3]; Wt->p=W[4]; #ifdef WRADHYD Wt->dX=W[5]; #ifdef HELIUM Wt->dXHE=W[6]; Wt->dXXHE=W[7]; #endif // HELIUM #endif // WRADHYD } //======================================================================================================================================== __device__ void olddmatrix_jacobian(struct Wtype *W0, REAL dt,REAL dx,struct Wtype *Dx,struct Wtype *Dy,struct Wtype *Dz, struct Wtype *Wt){ REAL M[25]; #ifdef HELIUM REAL W[8]={0.,0.,0.,0.,0.,0.,0.,0.}; #else REAL W[6]={0.,0.,0.,0.,0.,0.}; #endif // HELIUM REAL d[5]; int i,j; #ifdef WRADHYD REAL X; #endif // ===== building the A matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->u; // off_diagonal elements M[0+1*5]=W0->d; M[4+1*5]=W0->d*W0->a*W0->a; M[1+4*5]=1./W0->d; // ===== First Product d[0]=Dx->d; d[1]=Dx->u; d[2]=Dx->v; d[3]=Dx->w; d[4]=Dx->p; for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->u*Dx->dX+W0->dX*Dx->u; #ifdef HELIUM W[6]+=W0->u*Dx->dXHE+W0->dXHE*Dx->u; W[7]+=W0->u*Dx->dXXHE+W0->dXXHE*Dx->u; #endif // HELIUM #endif // WRADHYD // ===== building the B matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->v; // off_diagonal elements M[0+2*5]=W0->d; M[4+2*5]=W0->d*W0->a*W0->a; M[2+4*5]=1./W0->d; // ===== Second Product d[0]=Dy->d; d[1]=Dy->u; d[2]=Dy->v; d[3]=Dy->w; d[4]=Dy->p; for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->v*Dx->dX+W0->dX*Dx->v; #ifdef HELIUM W[6]+=W0->v*Dx->dXHE+W0->dXHE*Dx->v; W[7]+=W0->v*Dx->dXXHE+W0->dXXHE*Dx->v; #endif // HELIUM #endif // WRADHYD // ===== building the C matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->w; // off_diagonal elements M[0+3*5]=W0->d; M[4+3*5]=W0->d*W0->a*W0->a; M[3+4*5]=1./W0->d; d[0]=Dz->d; d[1]=Dz->u; d[2]=Dz->v; d[3]=Dz->w; d[4]=Dz->p; // ===== Third Product for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->w*Dx->dX+W0->w*Dx->dX; #ifdef HELIUM W[6]+=W0->w*Dx->dXHE+W0->dXHE*Dx->w; W[7]+=W0->w*Dx->dXXHE+W0->dXXHE*Dx->w; #endif // HELIUM #endif // WRADHYD // ==== Final correction for(i=0;i<6;i++){ W[i]*=(-dt/dx*0.5); } Wt->d=W[0]; Wt->u=W[1]; Wt->v=W[2]; Wt->w=W[3]; Wt->p=W[4]; #ifdef WRADHYD Wt->dX=W[5]; #ifdef HELIUM Wt->dXHE=W[6]; Wt->dXXHE=W[7]; #endif // HELIUM #endif // WRADHYD } __device__ void dMUSCL_BOUND2(struct HGRID *stencil, int ioct, int icell, struct Wtype *Wi,REAL dt,REAL dx){ struct OCT * oct; struct Wtype *W0; struct Wtype *Wp; struct Wtype *Wm; struct Wtype Dp,Dm; struct Wtype D[3]; struct Wtype Wt; int inei2; int vcell[6],vnei[6]; int dir; int idir; int shift; #ifdef WGRAV REAL f[3]; struct Utype S; struct Utype U; #endif // WGRAV getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors W0=&(stencil->oct[ioct].cell[icell].field); // Limited Slopes shift=1; for(dir=0;dir<3;dir++){ inei2=2*dir; if(vnei[inei2]==6){ Wm=&(stencil->oct[ioct].cell[vcell[inei2]].field); } else{ Wm=&(stencil->oct[ioct-shift].cell[vcell[inei2]].field); } inei2=2*dir+1; if(vnei[inei2]==6){ Wp=&(stencil->oct[ioct].cell[vcell[inei2]].field); } else{ Wp=&(stencil->oct[ioct+shift].cell[vcell[inei2]].field); } ddiffW(Wp,W0,&Dp); ddiffW(W0,Wm,&Dm); dminmod_W(&Dm,&Dp,D+dir); shift*=3; } // build jacobian matrix product dmatrix_jacobian(W0,dt,dx,&D[0],&D[1],&D[2],&Wt); // Here Wt contains the evolution of the state // READY TO EVOLVE EXTRAPOLATED VALUE REAL ix[]={-0.5,0.5,0.0,0.0,0.0,0.0}; REAL iy[]={0.0,0.0,-0.5,0.5,0.0,0.0}; REAL iz[]={0.0,0.0,0.0,0.0,-0.5,0.5}; #ifdef WGRAV #ifndef NOCOUPLE memcpy(f,stencil->oct[ioct].cell[icell].f,sizeof(REAL)*3); #ifdef CONSERVATIVE S.d =0.; S.du=-W0->d*f[0]*0.5*dt; S.dv=-W0->d*f[1]*0.5*dt; S.dw=-W0->d*f[2]*0.5*dt; S.E =-(W0->d*W0->u*f[0]+W0->d*W0->v*f[1]+W0->d*W0->w*f[2])*dt*0.5; #endif // CONSERVATIVE #endif // NOCOUPLE #endif // WGRAV for(idir=0;idir<6;idir++){ Wi[idir].d = W0->d+ix[idir]*D[0].d+iy[idir]*D[1].d+iz[idir]*D[2].d+Wt.d; Wi[idir].u = W0->u+ix[idir]*D[0].u+iy[idir]*D[1].u+iz[idir]*D[2].u+Wt.u; Wi[idir].v = W0->v+ix[idir]*D[0].v+iy[idir]*D[1].v+iz[idir]*D[2].v+Wt.v; Wi[idir].w = W0->w+ix[idir]*D[0].w+iy[idir]*D[1].w+iz[idir]*D[2].w+Wt.w; Wi[idir].p = FMAX(W0->p+ix[idir]*D[0].p+iy[idir]*D[1].p+iz[idir]*D[2].p+Wt.p,PMIN); #ifdef WRADHYD Wi[idir].dX = W0->dX+ix[idir]*D[0].dX+iy[idir]*D[1].dX+iz[idir]*D[2].dX+Wt.dX; #ifdef HELIUM Wi[idir].dXHE = W0->dXHE+ix[idir]*D[0].dXHE+iy[idir]*D[1].dXHE+iz[idir]*D[2].dXHE+Wt.dXHE; Wi[idir].dXXHE = W0->dXXHE+ix[idir]*D[0].dXXHE+iy[idir]*D[1].dXXHE+iz[idir]*D[2].dXXHE+Wt.dXXHE; #endif // HELIUM #endif // WRADHYD /* if(Wi[idir].d<0) { */ /* printf("neg d in extrapolation %e %e %e %e %e\n",Wi[idir].d,W0->d,D[0].d,D[1].d,D[2].d); */ /* abort(); */ /* } */ //if(Wi[idir].p==PMIN) printf("%e %e \n",W0->p,W0->p+ix[idir]*D[0].p+iy[idir]*D[1].p+iz[idir]*D[2].p+Wt.p); #ifdef WGRAV #ifndef NOCOUPLE #ifdef PRIMITIVE Wi[idir].u+=-f[0]*0.5*dt; Wi[idir].v+=-f[1]*0.5*dt; Wi[idir].w+=-f[2]*0.5*dt; #endif // PRIMITIVE #ifdef CONSERVATIVE W2U(&Wi[idir],&U); U.d +=S.d; U.du +=S.du; U.dv +=S.dv; U.dw +=S.dw; U.E +=S.E; U2W(&U,&Wi[idir]); #endif // CONSERVATIVE #endif // NOCOUPLE #endif // WGRAV dgetE(Wi+idir); Wi[idir].a=SQRT(GAMMA*Wi[idir].p/Wi[idir].d); #ifdef WRADHYD REAL X0=W0->dX/(W0->d*(1.-YHE)); Wi[idir].dX=Wi[idir].d*(1.-YHE)*X0; #ifdef HELIUM REAL XHE0=W0->dXHE/(W0->d*(YHE)); Wi[idir].dXHE=Wi[idir].d*(YHE)*XHE0; REAL XXHE0=W0->dXXHE/(W0->d*(YHE)); Wi[idir].dXXHE=Wi[idir].d*(YHE)*XXHE0; #endif // HELIUM #endif // WRADHYD } } // ============================================== __device__ void olddMUSCL_BOUND2(struct HGRID *stencil, int ioct, int icell, struct Wtype *Wi,REAL dt,REAL dx){ struct Wtype *W0; struct Wtype *Wp; struct Wtype *Wm; struct Wtype Dp,Dm; struct Wtype D[3]; struct Wtype Wt; int inei2; int vcell[6],vnei[6]; int dir; int idir; int shift; #ifdef WGRAV REAL f[3]; #ifdef CONSERVATIVE struct Utype S; struct Utype U; #endif #endif getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors W0=&(stencil->oct[ioct].cell[icell].field); // Limited Slopes shift=1; for(dir=0;dir<3;dir++){ inei2=2*dir; if(vnei[inei2]==6){ Wm=&(stencil->oct[ioct].cell[vcell[inei2]].field); } else{ Wm=&(stencil->oct[ioct-shift].cell[vcell[inei2]].field); } inei2=2*dir+1; if(vnei[inei2]==6){ Wp=&(stencil->oct[ioct].cell[vcell[inei2]].field); } else{ Wp=&(stencil->oct[ioct+shift].cell[vcell[inei2]].field); } ddiffW(Wp,W0,&Dp); ddiffW(W0,Wm,&Dm); dminmod_W(&Dm,&Dp,D+dir); shift*=3; } // build jacobian matrix product dmatrix_jacobian(W0,dt,dx,&D[0],&D[1],&D[2],&Wt); // Here Wt contains the evolution of the state // READY TO EVOLVE EXTRAPOLATED VALUE REAL ix[]={-0.5,0.5,0.0,0.0,0.0,0.0}; REAL iy[]={0.0,0.0,-0.5,0.5,0.0,0.0}; REAL iz[]={0.0,0.0,0.0,0.0,-0.5,0.5}; #ifdef WGRAV #ifndef NOCOUPLE memcpy(f,stencil->oct[ioct].cell[icell].f,sizeof(REAL)*3); #ifdef CONSERVATIVE S.d =0.; S.du=-W0->d*f[0]*0.5*dt; S.dv=-W0->d*f[1]*0.5*dt; S.dw=-W0->d*f[2]*0.5*dt; S.E =-(W0->d*W0->u*f[0]+W0->d*W0->v*f[1]+W0->d*W0->w*f[2])*dt*0.5; #endif #endif #endif for(idir=0;idir<6;idir++){ Wi[idir].d = W0->d+ix[idir]*D[0].d+iy[idir]*D[1].d+iz[idir]*D[2].d+Wt.d; Wi[idir].u = W0->u+ix[idir]*D[0].u+iy[idir]*D[1].u+iz[idir]*D[2].u+Wt.u; Wi[idir].v = W0->v+ix[idir]*D[0].v+iy[idir]*D[1].v+iz[idir]*D[2].v+Wt.v; Wi[idir].w = W0->w+ix[idir]*D[0].w+iy[idir]*D[1].w+iz[idir]*D[2].w+Wt.w; Wi[idir].p = FMAX(W0->p+ix[idir]*D[0].p+iy[idir]*D[1].p+iz[idir]*D[2].p+Wt.p,PMIN); #ifdef WRADHYD Wi[idir].dX = W0->dX+ix[idir]*D[0].dX+iy[idir]*D[1].dX+iz[idir]*D[2].dX+Wt.dX; #ifdef HELIUM Wi[idir].dXHE = W0->dXHE+ix[idir]*D[0].dXHE+iy[idir]*D[1].dXHE+iz[idir]*D[2].dXHE+Wt.dXHE; Wi[idir].dXXHE = W0->dXXHE+ix[idir]*D[0].dXXHE+iy[idir]*D[1].dXXHE+iz[idir]*D[2].dXXHE+Wt.dXXHE; #endif #endif /* if(Wi[idir].p<0) abort(); */ /* if(Wi[idir].d<0) abort(); */ #ifdef WGRAV #ifndef NOCOUPLE #ifdef PRIMITIVE Wi[idir].u+=-f[0]*0.5*dt; Wi[idir].v+=-f[1]*0.5*dt; Wi[idir].w+=-f[2]*0.5*dt; #endif #ifdef CONSERVATIVE dW2U(&Wi[idir],&U); U.d +=S.d; U.du +=S.du; U.dv +=S.dv; U.dw +=S.dw; U.E +=S.E; dU2W(&U,&Wi[idir]); #endif #endif #endif dgetE(Wi+idir); Wi[idir].a=SQRT(GAMMA*Wi[idir].p/Wi[idir].d); #ifdef WRADHYD REAL X0=W0->dX/(W0->d*(1.-YHE)); Wi[idir].dX=Wi[idir].d*(1.-YHE)*X0; #ifdef HELIUM REAL XHE0=W0->dXHE/(W0->d*(YHE)); Wi[idir].dXHE=Wi[idir].d*(YHE)*XHE0; REAL XXHE0=W0->dXXHE/(W0->d*(YHE)); Wi[idir].dXXHE=Wi[idir].d*(YHE)*XXHE0; #endif #endif } } //======================================================================================== __device__ REAL dfrootprime(REAL p, struct Wtype1D *WL, struct Wtype1D *WR) { REAL fL,fR; REAL AL,AR,BL,BR; AL=2./((GAMMA+1.)*WL->d); AR=2./((GAMMA+1.)*WR->d); BL=(GAMMA-1.)/(GAMMA+1.)*WL->p; BR=(GAMMA-1.)/(GAMMA+1.)*WR->p; fL=(p>WL->p?SQRT(AL/(BL+p))*(1.-(p-WL->p)/(2.*(BL+p))):POW(p/WL->p,-(GAMMA+1)/(2.*GAMMA))/(WL->d*WL->a)); fR=(p>WR->p?SQRT(AR/(BR+p))*(1.-(p-WR->p)/(2.*(BR+p))):POW(p/WR->p,-(GAMMA+1)/(2.*GAMMA))/(WR->d*WR->a)); return fL+fR; } // ------------------------------------ __device__ REAL dfroot(REAL p, struct Wtype1D *WL, struct Wtype1D *WR, REAL *u) { REAL fL,fR; REAL AL,AR,BL,BR; REAL Deltau; AL=2./((GAMMA+1.)*WL->d); AR=2./((GAMMA+1.)*WR->d); BL=(GAMMA-1.)/(GAMMA+1.)*WL->p; BR=(GAMMA-1.)/(GAMMA+1.)*WR->p; fL=(p>WL->p?(p-WL->p)*SQRT(AL/(BL+p)):2.*WL->a/(GAMMA-1.)*(POW(p/WL->p,(GAMMA-1)/(2.*GAMMA))-1.)); fR=(p>WR->p?(p-WR->p)*SQRT(AR/(BR+p)):2.*WR->a/(GAMMA-1.)*(POW(p/WR->p,(GAMMA-1)/(2.*GAMMA))-1.)); Deltau=WR->u-WL->u; *u=0.5*(WL->u+WR->u)+0.5*(fR-fL); return fL+fR+Deltau; } //======================================================================================== //======================================================================================== __device__ REAL dfindPressure(struct Wtype1D *WL, struct Wtype1D *WR, int *niter, REAL *u) { REAL ptr,pts,ppv; REAL ptr0,pts0,ppv0; REAL p,porg,dp; int i; REAL err; REAL unsurz=(2.0*GAMMA)/(GAMMA-1.0); REAL AL,AR,BL,BR,GL,GR; REAL pmin,pmax; REAL u2; pmin=FMIN(WL->p,WR->p); pmax=FMAX(WL->p,WR->p); // EXACT SOLVER // hybrid guess for pressure AL=2./((GAMMA+1.)*WL->d); AR=2./((GAMMA+1.)*WR->d); BL=(GAMMA-1.)/(GAMMA+1.)*WL->p; BR=(GAMMA-1.)/(GAMMA+1.)*WR->p; ppv0=0.5*(WL->p+WR->p)-0.125*(WR->u-WL->u)*(WR->d+WL->d)*(WR->a+WL->a); ptr0=POW((WL->a+WR->a-0.5*(GAMMA-1)*(WR->u-WL->u))/(WL->a/POW(WL->p,1./unsurz)+WR->a/POW(WR->p,1./unsurz)),unsurz); ppv=FMAX(ERRTOL,ppv0); ptr=FMAX(ERRTOL,ptr0); GL=SQRT(AL/(ppv+BL)); GR=SQRT(AR/(ppv+BR)); pts0=(GL*WL->p+GR*WR->p-(WR->u-WL->u))/(GL+GR); pts=FMAX(ERRTOL,pts0); if(((pmax/pmin)<2.0)&&((pmin<=ppv)&&(ppv<=pmax))){ p=ppv; } else{ if(ppv<pmin){ p=ptr; } else{ p=pts; } } //p=0.5*(WL->p+WR->p); //p=FMAX(p,ERRTOL); *niter=0; for(i=0;i<NITERMAX;i++) { dp=dfroot(p,WL,WR,&u2)/dfrootprime(p,WL,WR); if(FABS(dp)<ERRTOL) break; while((p-dp)<0){ dp=dp*0.5; } porg=p; p=p-dp; err=2.*FABS(p-porg)/(FABS(p+porg)); *niter=*niter+1; if(err<ERRTOL) break; if(dfroot(p,WL,WR,&u2)<ERRTOL) break; } dfroot(p,WL,WR,&u2); // last calculation to get u; *u=(REAL)u2; return p; } //======================================================================================== //======================================================================================== __device__ REAL dfindPressure_Hybrid(struct Wtype1D *WL, struct Wtype1D *WR, int *niter, REAL *ustar){ REAL ppvrs; REAL dbar,abar; REAL pmax,pmin,pstar; REAL AL,AR,BL,BR,GL,GR; dbar=0.5*(WL->d+WR->d); abar=0.5*(WL->a+WR->a); ppvrs=0.5*((WL->p+WR->p)+(WL->u-WR->u)*dbar*abar); pmax=FMAX(WL->p,WR->p); pmin=FMIN(WL->p,WR->p); pstar=ppvrs; //printf("dbar=%e abar=%e ppvrs=%e pmax=%e pmin=%e pstar=%e\n",dbar,abar,ppvrs,pmax,pmin,pstar); if(((pmax/pmin)<2.)&&((pmin<pstar)&&(pstar<pmax))){ // PVRS CASE pstar=ppvrs; *ustar=0.5*((WL->u+WR->u)+(WL->p-WR->p)/(dbar*abar)); } else{ if(pstar<pmin){ //TRRS CASE REAL z=(GAMMA-1.)/(2.*GAMMA); REAL iz=(2.*GAMMA)/(GAMMA-1.); pstar=POW((WL->a+WR->a-(GAMMA-1.)/2.*(WR->u-WL->u))/(WL->a/POW(WL->p,z)+WR->a/POW(WR->p,z)),iz); *ustar=WL->u-2.*WL->a/(GAMMA-1.)*(POW(pstar/WL->p,z)-1.); } else{ //TSRS CASE REAL p0; p0=FMAX(0.,ppvrs); AL=2./((GAMMA+1.)*WL->d); AR=2./((GAMMA+1.)*WR->d); BL=(GAMMA-1.)/(GAMMA+1.)*WL->p; BR=(GAMMA-1.)/(GAMMA+1.)*WR->p; GL=SQRT(AL/(p0+BL)); GR=SQRT(AR/(p0+BR)); pstar=(GL*WL->p+GR*WR->p-(WR->u-WL->u))/(GL+GR); *ustar=0.5*((WL->u+WR->u)+(pstar-WR->p)*GR-(pstar-WL->p)*GL); } } return pstar; } //==================================================================== __device__ void dspeedestimateX_HLLC(struct Wtype *WL,struct Wtype *WR, REAL *SL, REAL *SR, REAL *pstar, REAL *ustar){ REAL qL,qR; struct Wtype1D WLloc; struct Wtype1D WRloc; int n; WLloc.d=WL->d; WLloc.u=WL->u; WLloc.p=WL->p; WLloc.a=SQRT(GAMMA*WLloc.p/WLloc.d); WRloc.d=WR->d; WRloc.u=WR->u; WRloc.p=WR->p; WRloc.a=SQRT(GAMMA*WRloc.p/WRloc.d); //printf("%e %e %e %e ||| %e %e %e %e\n",WLloc.d,WLloc.u,WLloc.p,WLloc.a,WLloc.d,WRloc.u,WRloc.p,WRloc.a); #if 1 (*pstar)= dfindPressure_Hybrid(&WLloc,&WRloc,&n,ustar); if((*pstar)<0) (*pstar)=dfindPressure(&WLloc,&WRloc,&n,ustar); //if((*pstar)<=0) printf("shhh pstar=%e %e %d\n",*pstar,*ustar,n); qL=(*pstar<=WL->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WL->p-1.))); qR=(*pstar<=WR->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WR->p-1.))); *SL=WLloc.u-WLloc.a*qL; *SR=WRloc.u+WRloc.a*qR; if((*SL)>(*SR)){ (*SL)=FMIN(WLloc.u-WLloc.a,WRloc.u-WRloc.a); (*SR)=FMAX(WLloc.u+WLloc.a,WRloc.u+WRloc.a); } #endif //if(isnan(*ustar)) printf("Hehey\n"); } //==================================================================== void __device__ dspeedestimateY_HLLC(struct Wtype *WL,struct Wtype *WR, REAL *SL, REAL *SR, REAL *pstar, REAL *ustar){ REAL qL,qR; struct Wtype1D WLloc; struct Wtype1D WRloc; int n; WLloc.d=WL->d; WLloc.u=WL->v; WLloc.p=WL->p; WLloc.a=SQRT(GAMMA*WLloc.p/WLloc.d); WRloc.d=WR->d; WRloc.u=WR->v; WRloc.p=WR->p; WRloc.a=SQRT(GAMMA*WRloc.p/WRloc.d); (*pstar)=dfindPressure_Hybrid(&WLloc,&WRloc,&n,ustar); if((*pstar)<0) (*pstar)=dfindPressure(&WLloc,&WRloc,&n,ustar); // if((*pstar)<0) abort(); qL=(*pstar<=WL->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WL->p-1.))); qR=(*pstar<=WR->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WR->p-1.))); *SL=WLloc.u-WLloc.a*qL; *SR=WRloc.u+WRloc.a*qR; if((*SL)>(*SR)){ (*SL)=FMIN(WLloc.u-WLloc.a,WRloc.u-WRloc.a); (*SR)=FMAX(WLloc.u+WLloc.a,WRloc.u+WRloc.a); //abort(); } // if((*SL)>(*SR)) abort(); //if(isnan(*ustar)) printf("Hehey y\n"); } //==================================================================== void __device__ dspeedestimateZ_HLLC(struct Wtype *WL,struct Wtype *WR, REAL *SL, REAL *SR, REAL *pstar, REAL *ustar){ REAL qL,qR; struct Wtype1D WLloc; struct Wtype1D WRloc; int n; WLloc.d=WL->d; WLloc.u=WL->w; WLloc.p=WL->p; WLloc.a=SQRT(GAMMA*WLloc.p/WLloc.d); WRloc.d=WR->d; WRloc.u=WR->w; WRloc.p=WR->p; WRloc.a=SQRT(GAMMA*WRloc.p/WRloc.d); (*pstar)=dfindPressure_Hybrid(&WLloc,&WRloc,&n,ustar); if((*pstar)<0) (*pstar)=dfindPressure(&WLloc,&WRloc,&n,ustar); //if((*pstar)<0) abort(); qL=(*pstar<=WL->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WL->p-1.))); qR=(*pstar<=WR->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WR->p-1.))); *SL=WLloc.u-WLloc.a*qL; *SR=WRloc.u+WRloc.a*qR; if((*SL)>(*SR)){ (*SL)=FMIN(WLloc.u-WLloc.a,WRloc.u-WRloc.a); (*SR)=FMAX(WLloc.u+WLloc.a,WRloc.u+WRloc.a); //abort(); } //if((*SL)>(*SR)) abort(); //if(isnan(*ustar)) printf("Hehey z\n"); } // ============================================================================================= __global__ void dhydroM_sweepZ(struct HGRID *stencil,int nread,REAL dx, REAL dt){ int inei,icell,iface; int i; int vnei[6],vcell[6]; REAL FL[NVAR],FR[NVAR]; struct Utype Uold; struct Wtype Wold; REAL pstar,ustar; struct Wtype WT[6]; // FOR MUSCL RECONSTRUCTION struct Wtype WC[6]; // FOR MUSCL RECONSTRUCTION struct Utype UC[2]; struct Utype UN[2]; struct Wtype WN[2]; int ioct[7]={12,14,10,16,4,22,13}; int idxnei[6]={1,0,3,2,5,4}; struct Wtype *curcell; REAL SL,SR; int ffact[2]={0,0}; REAL fact; struct Utype Us; dinitUtype(&Us); REAL ebar; REAL ecen=0.; REAL divu,divuloc; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; for(icell=0;icell<8;icell++){ // we scan the cells getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors if(i<nread){ memset(FL,0,sizeof(REAL)*NVAR); memset(FR,0,sizeof(REAL)*NVAR); #if 1 // Getting the original state =========================== curcell=&(stencil[i].oct[ioct[6]].cell[icell].field); divu=stencil[i].New.cell[icell].divu; Wold.d=curcell->d; Wold.u=curcell->u; Wold.v=curcell->v; Wold.w=curcell->w; Wold.p=curcell->p; Wold.a=SQRT(GAMMA*Wold.p/Wold.d); dW2U(&Wold,&Uold); // primitive -> conservative REAL eold=Uold.eint; /* // MUSCL STATE RECONSTRUCTION */ memset(ffact,0,sizeof(int)*2); dMUSCL_BOUND2(stencil+i, 13, icell, WC,dt,dx);// central for(iface=0;iface<2;iface++){ inei=iface+4; memcpy(WC+iface,WC+inei,sizeof(struct Wtype)); // moving the data towards idx=0,1 //memcpy(WC+iface,&(stencil[i].oct[13].cell[inei].field),sizeof(struct Wtype)); // moving the data towards idx=0,1 //HACK dW2U(WC+iface,UC+iface); } // Neighbor MUSCL reconstruction for(iface=0;iface<2;iface++){ inei=iface+4; dMUSCL_BOUND2(stencil+i, ioct[vnei[inei]], vcell[inei], WT,dt,dx);// memcpy(WN+iface,WT+idxnei[inei],sizeof(struct Wtype)); //memcpy(WN+iface,&(stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].field),sizeof(struct Wtype)); //HACK dW2U(WN+iface,UN+iface); if(!stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].split){ ffact[iface]=1; // we cancel the contriubtion of split neighbors } } // Z DIRECTION ========================================================================= // --------- solving the Riemann Problems BOTTOM // Switching to Split description /* // =========================================== */ #ifdef RIEMANN_HLLC dspeedestimateZ_HLLC(&WN[0],&WC[0],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Z(&UN[0],FL); memcpy(&Us,&UN[0],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Z(&UC[0],FL); memcpy(&Us,&UC[0],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Z(&UN[0],FL); fact=WN[0].d*(SL-WN[0].w)/(SL-ustar); FL[0]+=(fact*1. -UN[0].d )*SL; FL[1]+=(fact*WN[0].u -UN[0].du)*SL; FL[2]+=(fact*WN[0].v -UN[0].dv)*SL; FL[3]+=(fact*ustar -UN[0].dw)*SL; FL[4]+=(fact*(UN[0].E/UN[0].d+(ustar-WN[0].w)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].w))))-UN[0].E )*SL; Us.d =(fact*1.); Us.du=(fact*WN[0].u); Us.dv=(fact*WN[0].v); Us.dw=(fact*ustar); Us.E =(fact*(UN[0].E/UN[0].d+(ustar-WN[0].w)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].w))))); #ifdef WRADHYD FL[6]+=(fact*WN[0].dX/WN[0].d -UN[0].dX)*SL; #ifdef HELIUM FL[7]+=(fact*WN[0].dXHE/WN[0].d -UN[0].dXHE)*SL; FL[8]+=(fact*WN[0].dXXHE/WN[0].d -UN[0].dXXHE)*SL; #endif // HELIUM #endif // WRADHYD } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Z(&UC[0],FL); fact=WC[0].d*(SR-WC[0].w)/(SR-ustar); FL[0]+=(fact*1. -UC[0].d )*SR; FL[1]+=(fact*WC[0].u -UC[0].du)*SR; FL[2]+=(fact*WC[0].v -UC[0].dv)*SR; FL[3]+=(fact*ustar -UC[0].dw)*SR; FL[4]+=(fact*(UC[0].E/UC[0].d+(ustar-WC[0].w)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].w))))-UC[0].E )*SR; Us.d =(fact*1.); Us.du=(fact*WC[0].u); Us.dv=(fact*WC[0].v); Us.dw=(fact*ustar); Us.E =(fact*(UC[0].E/UC[0].d+(ustar-WC[0].w)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].w))))); #ifdef WRADHYD FL[6]+=(fact*WC[0].dX/WC[0].d -UC[0].dX)*SR; #ifdef HELIUM FL[7]+=(fact*WC[0].dXHE/WC[0].d -UC[0].dXHE)*SR; FL[8]+=(fact*WC[0].dXXHE/WC[0].d -UC[0].dXXHE)*SR; #endif // HELIUM #endif // WRADHYD } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.dw/Us.d)*eold; FL[5]=(Us.dw/Us.d*ebar); divu+=-divuloc; #endif // RIEMANN_HLLC // =========================================== // --------- solving the Riemann Problems TOP // Switching to Split description //===================================================== #ifdef RIEMANN_HLLC dspeedestimateZ_HLLC(&WC[1],&WN[1],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Z(&UC[1],FR); memcpy(&Us,&UC[1],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Z(&UN[1],FR); memcpy(&Us,&UN[1],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Z(&UC[1],FR); fact=WC[1].d*(SL-WC[1].w)/(SL-ustar); FR[0]+=(fact*1. -UC[1].d )*SL; FR[1]+=(fact*WC[1].u -UC[1].du)*SL; FR[2]+=(fact*WC[1].v -UC[1].dv)*SL; FR[3]+=(fact*ustar -UC[1].dw)*SL; FR[4]+=(fact*(UC[1].E/UC[1].d+(ustar-WC[1].w)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].w))))-UC[1].E )*SL; Us.d =(fact*1.); Us.du=(fact*WC[1].u); Us.dv=(fact*WC[1].v); Us.dw=(fact*ustar); Us.E =(fact*(UC[1].E/UC[1].d+(ustar-WC[1].w)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].w))))); #ifdef WRADHYD FR[6]+=(fact*WC[1].dX/WC[1].d -UC[1].dX)*SL; #ifdef HELIUM FR[7]+=(fact*WC[1].dXHE/WC[1].d -UC[1].dXHE)*SL; FR[8]+=(fact*WC[1].dXXHE/WC[1].d -UC[1].dXXHE)*SL; #endif // HELIUM #endif // WRADHYD } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Z(&UN[1],FR); fact=WN[1].d*(SR-WN[1].w)/(SR-ustar); FR[0]+=(fact*1. -UN[1].d )*SR; FR[1]+=(fact*WN[1].u -UN[1].du)*SR; FR[2]+=(fact*WN[1].v -UN[1].dv)*SR; FR[3]+=(fact*ustar -UN[1].dw)*SR; FR[4]+=(fact*(UN[1].E/UN[1].d+(ustar-WN[1].w)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].w))))-UN[1].E )*SR; Us.d =(fact*1.); Us.du=(fact*WN[1].u); Us.dv=(fact*WN[1].v); Us.dw=(fact*ustar); Us.E =(fact*(UN[1].E/UN[1].d+(ustar-WN[1].w)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].w))))); #ifdef WRADHYD FR[6]+=(fact*WN[1].dX/WN[1].d -UN[1].dX)*SR; #ifdef HELIUM FR[7]+=(fact*WN[1].dXHE/WN[1].d -UN[1].dXHE)*SR; FR[8]+=(fact*WN[1].dXXHE/WN[1].d -UN[1].dXXHE)*SR; #endif // HELIUM #endif // WRADHYD } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.dw/Us.d)*eold; FR[5]=(Us.dw/Us.d*ebar); divu+= divuloc; #endif // RIEMANN_HLLC //========================= copy the fluxes // Cancelling the fluxes from splitted neighbours #endif for(iface=0;iface<NVAR;iface++) FL[iface]*=ffact[0]; for(iface=0;iface<NVAR;iface++) FR[iface]*=ffact[1]; memcpy(stencil[i].New.cell[icell].flux+4*NVAR,FL,sizeof(REAL)*NVAR); memcpy(stencil[i].New.cell[icell].flux+5*NVAR,FR,sizeof(REAL)*NVAR); stencil[i].New.cell[icell].divu=divu; // ready for the next cell } //ready for the next oct } } __global__ void olddhydroM_sweepZ(struct HGRID *stencil, int nread,REAL dx, REAL dt){ int inei,icell,iface; int i; int vnei[6],vcell[6]; REAL FL[NVAR],FR[NVAR]; struct Utype Uold; struct Wtype Wold; REAL pstar,ustar; struct Wtype WT[6]; // FOR MUSCL RECONSTRUCTION struct Wtype WC[6]; // FOR MUSCL RECONSTRUCTION struct Utype UC[2]; struct Utype UN[2]; struct Wtype WN[2]; int ioct[7]={12,14,10,16,4,22,13}; int idxnei[6]={1,0,3,2,5,4}; struct Wtype *curcell; REAL SL,SR; int ffact[2]={0,0}; REAL fact; struct Utype Us; REAL ebar; REAL divu,divuloc; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors memset(FL,0,sizeof(REAL)*NVAR); memset(FR,0,sizeof(REAL)*NVAR); #if 1 // Getting the original state =========================== curcell=&(stencil[i].oct[ioct[6]].cell[icell].field); #ifdef DUAL_E divu=stencil[i].New.cell[icell].divu; #endif Wold.d=curcell->d; Wold.u=curcell->u; Wold.v=curcell->v; Wold.w=curcell->w; Wold.p=curcell->p; Wold.a=SQRT(GAMMA*Wold.p/Wold.d); dW2U(&Wold,&Uold); // primitive -> conservative REAL eold=Uold.eint; /* // MUSCL STATE RECONSTRUCTION */ memset(ffact,0,sizeof(int)*2); dMUSCL_BOUND2(stencil+i, 13, icell, WC,dt,dx);// central for(iface=0;iface<2;iface++){ inei=iface+4; memcpy(WC+iface,WC+inei,sizeof(struct Wtype)); // moving the data towards idx=0,1 //memcpy(WC+iface,&(stencil[i].oct[13].cell[inei].field),sizeof(struct Wtype)); // moving the data towards idx=0,1 //HACK dW2U(WC+iface,UC+iface); } // Neighbor MUSCL reconstruction for(iface=0;iface<2;iface++){ inei=iface+4; dMUSCL_BOUND2(stencil+i, ioct[vnei[inei]], vcell[inei], WT,dt,dx);// memcpy(WN+iface,WT+idxnei[inei],sizeof(struct Wtype)); //memcpy(WN+iface,&(stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].field),sizeof(struct Wtype)); //HACK dW2U(WN+iface,UN+iface); if(!stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].split){ ffact[iface]=1; // we cancel the contriubtion of split neighbors } } // Z DIRECTION ========================================================================= // --------- solving the Riemann Problems BOTTOM // Switching to Split description /* // =========================================== */ #ifdef RIEMANN_HLLC dspeedestimateZ_HLLC(&WN[0],&WC[0],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Z(&UN[0],FL); memcpy(&Us,&UN[0],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Z(&UC[0],FL); memcpy(&Us,&UC[0],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Z(&UN[0],FL); fact=WN[0].d*(SL-WN[0].w)/(SL-ustar); FL[0]+=(fact*1. -UN[0].d )*SL; FL[1]+=(fact*WN[0].u -UN[0].du)*SL; FL[2]+=(fact*WN[0].v -UN[0].dv)*SL; FL[3]+=(fact*ustar -UN[0].dw)*SL; FL[4]+=(fact*(UN[0].E/UN[0].d+(ustar-WN[0].w)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].w))))-UN[0].E )*SL; Us.d =(fact*1.); Us.du=(fact*WN[0].u); Us.dv=(fact*WN[0].v); Us.dw=(fact*ustar); Us.E =(fact*(UN[0].E/UN[0].d+(ustar-WN[0].w)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].w))))); #ifdef WRADHYD FL[6]+=(fact*WN[0].dX/WN[0].d -UN[0].dX)*SL; #ifdef HELIUM FL[7]+=(fact*WN[0].dXHE/WN[0].d -UN[0].dXHE)*SL; FL[8]+=(fact*WN[0].dXXHE/WN[0].d -UN[0].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Z(&UC[0],FL); fact=WC[0].d*(SR-WC[0].w)/(SR-ustar); FL[0]+=(fact*1. -UC[0].d )*SR; FL[1]+=(fact*WC[0].u -UC[0].du)*SR; FL[2]+=(fact*WC[0].v -UC[0].dv)*SR; FL[3]+=(fact*ustar -UC[0].dw)*SR; FL[4]+=(fact*(UC[0].E/UC[0].d+(ustar-WC[0].w)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].w))))-UC[0].E )*SR; Us.d =(fact*1.); Us.du=(fact*WC[0].u); Us.dv=(fact*WC[0].v); Us.dw=(fact*ustar); Us.E =(fact*(UC[0].E/UC[0].d+(ustar-WC[0].w)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].w))))); #ifdef WRADHYD FL[6]+=(fact*WC[0].dX/WC[0].d -UC[0].dX)*SR; #ifdef HELIUM FL[7]+=(fact*WC[0].dXHE/WC[0].d -UC[0].dXHE)*SR; FL[8]+=(fact*WC[0].dXXHE/WC[0].d -UC[0].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.dw/Us.d)*eold; FL[5]=(Us.dw/Us.d*ebar); divu+=-divuloc; #endif // =========================================== // --------- solving the Riemann Problems TOP // Switching to Split description //===================================================== #ifdef RIEMANN_HLLC dspeedestimateZ_HLLC(&WC[1],&WN[1],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Z(&UC[1],FR); memcpy(&Us,&UC[1],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Z(&UN[1],FR); memcpy(&Us,&UN[1],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Z(&UC[1],FR); fact=WC[1].d*(SL-WC[1].w)/(SL-ustar); FR[0]+=(fact*1. -UC[1].d )*SL; FR[1]+=(fact*WC[1].u -UC[1].du)*SL; FR[2]+=(fact*WC[1].v -UC[1].dv)*SL; FR[3]+=(fact*ustar -UC[1].dw)*SL; FR[4]+=(fact*(UC[1].E/UC[1].d+(ustar-WC[1].w)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].w))))-UC[1].E )*SL; Us.d =(fact*1.); Us.du=(fact*WC[1].u); Us.dv=(fact*WC[1].v); Us.dw=(fact*ustar); Us.E =(fact*(UC[1].E/UC[1].d+(ustar-WC[1].w)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].w))))); #ifdef WRADHYD FR[6]+=(fact*WC[1].dX/WC[1].d -UC[1].dX)*SL; #ifdef HELIUM FR[7]+=(fact*WC[1].dXHE/WC[1].d -UC[1].dXHE)*SL; FR[8]+=(fact*WC[1].dXXHE/WC[1].d -UC[1].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Z(&UN[1],FR); fact=WN[1].d*(SR-WN[1].w)/(SR-ustar); FR[0]+=(fact*1. -UN[1].d )*SR; FR[1]+=(fact*WN[1].u -UN[1].du)*SR; FR[2]+=(fact*WN[1].v -UN[1].dv)*SR; FR[3]+=(fact*ustar -UN[1].dw)*SR; FR[4]+=(fact*(UN[1].E/UN[1].d+(ustar-WN[1].w)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].w))))-UN[1].E )*SR; Us.d =(fact*1.); Us.du=(fact*WN[1].u); Us.dv=(fact*WN[1].v); Us.dw=(fact*ustar); Us.E =(fact*(UN[1].E/UN[1].d+(ustar-WN[1].w)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].w))))); #ifdef WRADHYD FR[6]+=(fact*WN[1].dX/WN[1].d -UN[1].dX)*SR; #ifdef HELIUM FR[7]+=(fact*WN[1].dXHE/WN[1].d -UN[1].dXHE)*SR; FR[8]+=(fact*WN[1].dXXHE/WN[1].d -UN[1].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.dw/Us.d)*eold; FR[5]=(Us.dw/Us.d*ebar); divu+= divuloc; #endif #endif //========================= copy the fluxes // Cancelling the fluxes from splitted neighbours for(iface=0;iface<NVAR;iface++) FL[iface]*=ffact[0]; for(iface=0;iface<NVAR;iface++) FR[iface]*=ffact[1]; memcpy(stencil[i].New.cell[icell].flux+4*NVAR,FL,sizeof(REAL)*NVAR); memcpy(stencil[i].New.cell[icell].flux+5*NVAR,FR,sizeof(REAL)*NVAR); stencil[i].New.cell[icell].divu=divu; //ready for the next oct } } } //============================================================================ // ============================================================================================= __global__ void dhydroM_sweepY(struct HGRID *stencil,int nread,REAL dx, REAL dt){ int inei,icell,iface; int i; int vnei[6],vcell[6]; REAL FL[NVAR],FR[NVAR]; struct Utype Uold; struct Wtype Wold; REAL pstar,ustar; struct Wtype WT[6]; // FOR MUSCL RECONSTRUCTION struct Wtype WC[6]; // FOR MUSCL RECONSTRUCTION struct Utype UC[2]; struct Utype UN[2]; struct Wtype WN[2]; int ioct[7]={12,14,10,16,4,22,13}; int idxnei[6]={1,0,3,2,5,4}; struct Wtype *curcell; REAL SL,SR; int ffact[2]={0,0}; REAL fact; struct Utype Us; REAL ebar; REAL divu,divuloc; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors memset(FL,0,sizeof(REAL)*NVAR); memset(FR,0,sizeof(REAL)*NVAR); // Getting the original state =========================== curcell=&(stencil[i].oct[ioct[6]].cell[icell].field); divu=stencil[i].New.cell[icell].divu; Wold.d=curcell->d; Wold.u=curcell->u; Wold.v=curcell->v; Wold.w=curcell->w; Wold.p=curcell->p; Wold.a=SQRT(GAMMA*Wold.p/Wold.d); /* #ifdef WRADHYD */ /* Wold.X=curcell->X; */ /* #endif */ dW2U(&Wold,&Uold); // primitive -> conservative REAL eold=Uold.eint; /* // MUSCL STATE RECONSTRUCTION */ memset(ffact,0,sizeof(int)*2); dMUSCL_BOUND2(stencil+i, 13, icell, WC,dt,dx);// central for(iface=0;iface<2;iface++){ inei=iface+2; memcpy(WC+iface,WC+inei,sizeof(struct Wtype)); // moving the data towards idx=0,1 //memcpy(WC+iface,&Wold,sizeof(struct Wtype)); // moving the data towards idx=0,1 dW2U(WC+iface,UC+iface); } // Neighbor MUSCL reconstruction for(iface=0;iface<2;iface++){ inei=iface+2; dMUSCL_BOUND2(stencil+i, ioct[vnei[inei]], vcell[inei], WT,dt,dx);// memcpy(WN+iface,WT+idxnei[inei],sizeof(struct Wtype)); //memcpy(WN+iface,&(stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].field),sizeof(struct Wtype)); dW2U(WN+iface,UN+iface); if(!stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].split){ ffact[iface]=1; // we cancel the contriubtion of split neighbors } } // Y DIRECTION ========================================================================= // --------- solving the Riemann Problems FRONT // Switching to Split description /* // =========================================== */ #ifdef RIEMANN_HLLC dspeedestimateY_HLLC(&WN[0],&WC[0],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Y(&UN[0],FL); memcpy(&Us,&UN[0],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Y(&UC[0],FL); memcpy(&Us,&UC[0],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Y(&UN[0],FL); fact=WN[0].d*(SL-WN[0].v)/(SL-ustar); FL[0]+=(fact*1. -UN[0].d )*SL; FL[1]+=(fact*WN[0].u -UN[0].du)*SL; FL[2]+=(fact*ustar -UN[0].dv)*SL; FL[3]+=(fact*WN[0].w -UN[0].dw)*SL; FL[4]+=(fact*(UN[0].E/UN[0].d+(ustar-WN[0].v)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].v))))-UN[0].E )*SL; Us.d =(fact*1.); Us.du=(fact*WN[0].u); Us.dv=(fact*ustar); Us.dw=(fact*WN[0].w); Us.E =(fact*(UN[0].E/UN[0].d+(ustar-WN[0].v)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].v))))); #ifdef WRADHYD FL[6]+=(fact*WN[0].dX/WN[0].d -UN[0].dX)*SL; #ifdef HELIUM FL[7]+=(fact*WN[0].dXHE/WN[0].d -UN[0].dXHE)*SL; FL[8]+=(fact*WN[0].dXXHE/WN[0].d -UN[0].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Y(&UC[0],FL); fact=WC[0].d*(SR-WC[0].v)/(SR-ustar); FL[0]+=(fact*1. -UC[0].d )*SR; FL[1]+=(fact*WC[0].u -UC[0].du)*SR; FL[2]+=(fact*ustar -UC[0].dv)*SR; FL[3]+=(fact*WC[0].w -UC[0].dw)*SR; FL[4]+=(fact*(UC[0].E/UC[0].d+(ustar-WC[0].v)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].v))))-UC[0].E )*SR; Us.d =(fact*1.); Us.du=(fact*WC[0].u); Us.dv=(fact*ustar); Us.dw=(fact*WC[0].w); Us.E =(fact*(UC[0].E/UC[0].d+(ustar-WC[0].v)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].v))))); #ifdef WRADHYD FL[6]+=(fact*WC[0].dX/WC[0].d -UC[0].dX)*SR; #ifdef HELIUM FL[7]+=(fact*WC[0].dXHE/WC[0].d -UC[0].dXHE)*SR; FL[8]+=(fact*WC[0].dXXHE/WC[0].d -UC[0].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); FL[5]=(Us.dv/Us.d*ebar); divuloc=(GAMMA-1.)*(Us.dv/Us.d)*eold; divu+=-divuloc; #endif // =========================================== // --------- solving the Riemann Problems BACK // Switching to Split description //===================================================== #ifdef RIEMANN_HLLC dspeedestimateY_HLLC(&WC[1],&WN[1],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Y(&UC[1],FR); memcpy(&Us,&UC[1],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Y(&UN[1],FR); memcpy(&Us,&UN[1],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Y(&UC[1],FR); fact=WC[1].d*(SL-WC[1].v)/(SL-ustar); FR[0]+=(fact*1. -UC[1].d )*SL; FR[1]+=(fact*WC[1].u -UC[1].du)*SL; FR[2]+=(fact*ustar -UC[1].dv)*SL; FR[3]+=(fact*WC[1].w -UC[1].dw)*SL; FR[4]+=(fact*(UC[1].E/UC[1].d+(ustar-WC[1].v)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].v))))-UC[1].E )*SL; Us.d =(fact*1.); Us.du=(fact*WC[1].u); Us.dv=(fact*ustar); Us.dw=(fact*WC[1].w); Us.E =(fact*(UC[1].E/UC[1].d+(ustar-WC[1].v)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].v))))); #ifdef WRADHYD FR[6]+=(fact*WC[1].dX/WC[1].d -UC[1].dX)*SL; #ifdef HELIUM FR[7]+=(fact*WC[1].dXHE/WC[1].d -UC[1].dXHE)*SL; FR[8]+=(fact*WC[1].dXXHE/WC[1].d -UC[1].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Y(&UN[1],FR); fact=WN[1].d*(SR-WN[1].v)/(SR-ustar); FR[0]+=(fact*1. -UN[1].d )*SR; FR[1]+=(fact*WN[1].u -UN[1].du)*SR; FR[2]+=(fact*ustar -UN[1].dv)*SR; FR[3]+=(fact*WN[1].w -UN[1].dw)*SR; FR[4]+=(fact*(UN[1].E/UN[1].d+(ustar-WN[1].v)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].v))))-UN[1].E )*SR; Us.d =(fact*1.); Us.du=(fact*WN[1].u); Us.dv=(fact*ustar); Us.dw=(fact*WN[1].w); Us.E =(fact*(UN[1].E/UN[1].d+(ustar-WN[1].v)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].v))))); #ifdef WRADHYD FR[6]+=(fact*WN[1].dX/WN[1].d -UN[1].dX)*SR; #ifdef HELIUM FR[7]+=(fact*WN[1].dXHE/WN[1].d -UN[1].dXHE)*SR; FR[8]+=(fact*WN[1].dXXHE/WN[1].d -UN[1].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.dv/Us.d)*eold; FR[5]=(Us.dv/Us.d*ebar); divu+= divuloc; #endif //========================= copy the fluxes // Cancelling the fluxes from splitted neighbours for(iface=0;iface<NVAR;iface++) FL[iface]*=ffact[0]; for(iface=0;iface<NVAR;iface++) FR[iface]*=ffact[1]; memcpy(stencil[i].New.cell[icell].flux+2*NVAR,FL,sizeof(REAL)*NVAR); memcpy(stencil[i].New.cell[icell].flux+3*NVAR,FR,sizeof(REAL)*NVAR); stencil[i].New.cell[icell].divu=divu; //ready for the next oct } } } //=================================================================================================== //=================================================================================================== __global__ void dhydroM_sweepX(struct HGRID *stencil, int nread,REAL dx, REAL dt){ //printf("IN\n"); int inei,icell,iface; int i; int vnei[6],vcell[6]; REAL FL[NVAR],FR[NVAR]; struct Utype Uold; struct Wtype Wold; REAL pstar,ustar; struct Wtype WT[6]; // FOR MUSCL RECONSTRUCTION struct Wtype WC[6]; // FOR MUSCL RECONSTRUCTION struct Utype UC[2]; struct Utype UN[2]; struct Wtype WN[2]; int ioct[7]={12,14,10,16,4,22,13}; int idxnei[6]={1,0,3,2,5,4}; struct Wtype *curcell; REAL SL,SR; int ffact[2]={0,0}; REAL fact; struct Utype Us; REAL ebar; REAL divu,divuloc; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors memset(FL,0,sizeof(REAL)*NVAR); memset(FR,0,sizeof(REAL)*NVAR); // Getting the original state =========================== curcell=&(stencil[i].oct[ioct[6]].cell[icell].field); divu=stencil[i].New.cell[icell].divu; Wold.d=curcell->d; Wold.u=curcell->u;; Wold.v=curcell->v;; Wold.w=curcell->w;; Wold.p=curcell->p;; Wold.a=SQRT(GAMMA*Wold.p/Wold.d); //printf("dt=%e dx=%e Old =%e %e %e %e %e\n",dt,dx,Wold.d,Wold.u,Wold.v,Wold.w,Wold.p); /* #ifdef WRADHYD */ /* Wold.X=curcell->X; */ /* #endif */ dW2U(&Wold,&Uold); // primitive -> conservative REAL eold=Uold.eint; /* // MUSCL STATE RECONSTRUCTION */ memset(ffact,0,sizeof(int)*2); dMUSCL_BOUND2(stencil+i, 13, icell, WC,dt,dx);// central for(iface=0;iface<2;iface++){ dW2U(WC+iface,UC+iface); } // Neighbor MUSCL reconstruction for(iface=0;iface<2;iface++){ inei=iface; dMUSCL_BOUND2(stencil+i, ioct[vnei[inei]], vcell[inei], WT,dt,dx);// memcpy(WN+iface,WT+idxnei[inei],sizeof(struct Wtype)); dW2U(WN+iface,UN+iface); if(!stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].split){ ffact[iface]=1; // we cancel the contriubtion of split neighbors } } // X DIRECTION ========================================================================= // --------- solving the Riemann Problems LEFT // Switching to Split description /* // =========================================== */ #ifdef RIEMANN_HLLC //printf("Ho %e %e %e %e|| %e %e %e %e\n",WC[0].d,WC[0].u,WC[0].p,WC[0].a,WN[0].d,WN[0].u,WN[0].p,WN[0].a); dspeedestimateX_HLLC(&WN[0],&WC[0],&SL,&SR,&pstar,&ustar); //printf("Ha\n"); if(SL>=0.){ dgetflux_X(&UN[0],FL); memcpy(&Us,&UN[0],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_X(&UC[0],FL); memcpy(&Us,&UC[0],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_X(&UN[0],FL); fact=WN[0].d*(SL-WN[0].u)/(SL-ustar); FL[0]+=(fact*1. -UN[0].d )*SL; FL[1]+=(fact*ustar -UN[0].du)*SL; FL[2]+=(fact*WN[0].v -UN[0].dv)*SL; FL[3]+=(fact*WN[0].w -UN[0].dw)*SL; FL[4]+=(fact*(UN[0].E/UN[0].d+(ustar-WN[0].u)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].u))))-UN[0].E )*SL; Us.d =(fact*1.); Us.du=(fact*ustar); Us.dv=(fact*WN[0].v); Us.dw=(fact*WN[0].w); Us.E =(fact*(UN[0].E/UN[0].d+(ustar-WN[0].u)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].u))))); #ifdef WRADHYD FL[6]+=(fact*WN[0].dX/WN[0].d -UN[0].dX)*SL; #ifdef HELIUM FL[7]+=(fact*WN[0].dXHE/WN[0].d -UN[0].dXHE)*SL; FL[8]+=(fact*WN[0].dXXHE/WN[0].d -UN[0].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_X(&UC[0],FL); fact=WC[0].d*(SR-WC[0].u)/(SR-ustar); FL[0]+=(fact*1. -UC[0].d )*SR; FL[1]+=(fact*ustar -UC[0].du)*SR; FL[2]+=(fact*WC[0].v -UC[0].dv)*SR; FL[3]+=(fact*WC[0].w -UC[0].dw)*SR; FL[4]+=(fact*(UC[0].E/UC[0].d+(ustar-WC[0].u)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].u))))-UC[0].E )*SR; Us.d =(fact*1.); Us.du=(fact*ustar); Us.dv=(fact*WC[0].v); Us.dw=(fact*WC[0].w); Us.E =(fact*(UC[0].E/UC[0].d+(ustar-WC[0].u)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].u))))); #ifdef WRADHYD FL[6]+=(fact*WC[0].dX/WC[0].d -UC[0].dX)*SR; #ifdef HELIUM FL[7]+=(fact*WC[0].dXHE/WC[0].d -UC[0].dXHE)*SR; FL[8]+=(fact*WC[0].dXXHE/WC[0].d -UC[0].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.du/Us.d)*eold; FL[5]=(Us.du/Us.d*ebar); divu+=-divuloc; #endif // =========================================== // --------- solving the Riemann Problems RIGHT // Switching to Split description //===================================================== #ifdef RIEMANN_HLLC dspeedestimateX_HLLC(&WC[1],&WN[1],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_X(&UC[1],FR); memcpy(&Us,&UC[1],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_X(&UN[1],FR); memcpy(&Us,&UN[1],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_X(&UC[1],FR); fact=WC[1].d*(SL-WC[1].u)/(SL-ustar); FR[0]+=(fact*1. -UC[1].d )*SL; FR[1]+=(fact*ustar -UC[1].du)*SL; FR[2]+=(fact*WC[1].v -UC[1].dv)*SL; FR[3]+=(fact*WC[1].w -UC[1].dw)*SL; FR[4]+=(fact*(UC[1].E/UC[1].d+(ustar-WC[1].u)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].u))))-UC[1].E )*SL; Us.d =(fact*1.); Us.du=(fact*ustar); Us.dv=(fact*WC[1].v); Us.dw=(fact*WC[1].w); Us.E =(fact*(UC[1].E/UC[1].d+(ustar-WC[1].u)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].u))))); #ifdef WRADHYD FR[6]+=(fact*WC[1].dX/WC[1].d -UC[1].dX)*SL; #ifdef HELIUM FR[7]+=(fact*WC[1].dXHE/WC[1].d -UC[1].dXHE)*SL; FR[8]+=(fact*WC[1].dXXHE/WC[1].d -UC[1].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_X(&UN[1],FR); fact=WN[1].d*(SR-WN[1].u)/(SR-ustar); FR[0]+=(fact*1. -UN[1].d )*SR; FR[1]+=(fact*ustar -UN[1].du)*SR; FR[2]+=(fact*WN[1].v -UN[1].dv)*SR; FR[3]+=(fact*WN[1].w -UN[1].dw)*SR; FR[4]+=(fact*(UN[1].E/UN[1].d+(ustar-WN[1].u)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].u))))-UN[1].E )*SR; Us.d =(fact*1.); Us.du=(fact*ustar); Us.dv=(fact*WN[1].v); Us.dw=(fact*WN[1].w); Us.E =(fact*(UN[1].E/UN[1].d+(ustar-WN[1].u)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].u))))); #ifdef WRADHYD FR[6]+=(fact*WN[1].dX/WN[1].d -UN[1].dX)*SR; #ifdef HELIUM FR[7]+=(fact*WN[1].dXHE/WN[1].d -UN[1].dXHE)*SR; FR[8]+=(fact*WN[1].dXXHE/WN[1].d -UN[1].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.du/Us.d)*eold; FR[5]=(Us.du/Us.d*ebar); divu+= divuloc; #endif //========================= copy the fluxes // Cancelling the fluxes from splitted neighbours for(iface=0;iface<NVAR;iface++) FL[iface]*=ffact[0]; for(iface=0;iface<NVAR;iface++) FR[iface]*=ffact[1]; memcpy(stencil[i].New.cell[icell].flux+0*NVAR,FL,sizeof(REAL)*NVAR); memcpy(stencil[i].New.cell[icell].flux+1*NVAR,FR,sizeof(REAL)*NVAR); stencil[i].New.cell[icell].divu=divu; //ready for the next oct } } } // ============================================================================================================== // ============================================================================================================== __global__ void dupdatefield(struct HGRID *stencil, int nread, int stride, struct CPUINFO *cpu, REAL dxcur, REAL dtnew) { int i,icell; struct Utype U; REAL one; int flx; REAL dtsurdx=dtnew/dxcur; REAL F[NFLUX]; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells if(stencil[i].oct[13].cell[icell].split) continue; memcpy(F,stencil[i].New.cell[icell].flux,sizeof(REAL)*NFLUX);// New fluxes from the stencil // ==== updating // actually we compute and store the delta U only one=1.; memset(&U,0,sizeof(struct Utype)); // setting delta U for(flx=0;flx<6;flx++){ U.d +=F[0+flx*NVAR]*dtsurdx*one; U.du+=F[1+flx*NVAR]*dtsurdx*one; U.dv+=F[2+flx*NVAR]*dtsurdx*one; U.dw+=F[3+flx*NVAR]*dtsurdx*one; U.E +=F[4+flx*NVAR]*dtsurdx*one; U.eint+=F[5+flx*NVAR]*dtsurdx*one; #ifdef WRADHYD #ifndef NOADX U.dX+=F[6+flx*NVAR]*dtsurdx*one; #ifdef HELIUM U.dXHE+=F[7+flx*NVAR]*dtsurdx*one; U.dXXHE+=F[8+flx*NVAR]*dtsurdx*one; #endif #else U.dX+=0.; #ifdef HELIUM U.dXHE+=0.; U.dXXHE+=0.; #endif #endif #endif one*=-1.; } // scatter back the delta Uwithin the stencil memcpy(&(stencil[i].New.cell[icell].deltaU),&U,sizeof(struct Utype)); } } } // ======================================================= //======================================================================= //======================================================================= int advancehydroGPU(struct OCT **firstoct, int level, struct CPUINFO *cpu, struct HGRID *stencil, int stride, REAL dxcur, REAL dtnew){ struct OCT *nextoct; struct OCT *curoct; struct OCT *curoct0; int nreadtot,nread; /* REAL t[10]; */ /* REAL tg=0.,th=0.,tu=0.,ts=0.; */ int is; int offset; CUDA_CHECK_ERROR("Hydro start"); // --------------- setting the first oct of the level nextoct=firstoct[level-1]; nreadtot=0; int ng; int nt; hipStream_t stream[cpu->nstream]; // Not fully regular expression int vnread[cpu->nstream]; // creating the streams for(is=0;is<cpu->nstream;is++){ hipStreamCreate(&stream[is]); } // Calculations if((nextoct!=NULL)&&(cpu->noct[level-1]!=0)){ do { curoct0=nextoct; curoct=curoct0; //t[0]=MPI_Wtime(); #if 1 offset=0; // streaming ==================== for(is=0;is<cpu->nstream;is++){ // ------------ gathering the stencil value values //printf("offser=%d\n",offset); curoct=nextoct; if(curoct!=NULL){ nextoct= gatherstencil(curoct,stencil+offset,stride/cpu->nstream,cpu, vnread+is); if(vnread[is]!=0){ ng=((vnread[is]-1)/cpu->nthread)+1; // +1 is for leftovers if(ng==1){ nt=vnread[is]; } else{ nt=cpu->nthread; } dim3 gridoct(ng); dim3 blockoct(nt); #ifndef NOCOMP hipMemcpyAsync(cpu->hyd_stencil+offset,stencil+offset,vnread[is]*sizeof(struct HGRID),hipMemcpyHostToDevice,stream[is]); //printf("Sweep hydro dt=%e dx=%e\n",dtnew,dxcur); //CUDA_CHECK_ERROR("Sweep hydro"); // ------------ solving the hydro hipLaunchKernelGGL(( dhydroM_sweepX), dim3(gridoct),dim3(blockoct),0,stream[is], cpu->hyd_stencil+offset,vnread[is],dxcur,dtnew); hipLaunchKernelGGL(( dhydroM_sweepY), dim3(gridoct),dim3(blockoct),0,stream[is], cpu->hyd_stencil+offset,vnread[is],dxcur,dtnew); hipLaunchKernelGGL(( dhydroM_sweepZ), dim3(gridoct),dim3(blockoct),0,stream[is], cpu->hyd_stencil+offset,vnread[is],dxcur,dtnew); //printf("Sweep hydro stop\n"); // ------------ updating values within the stencil hipLaunchKernelGGL(( dupdatefield), dim3(gridoct),dim3(blockoct),0,stream[is], cpu->hyd_stencil+offset,vnread[is],stride,cpu,dxcur,dtnew); hipMemcpyAsync(stencil+offset,cpu->hyd_stencil+offset,vnread[is]*sizeof(struct HGRID),hipMemcpyDeviceToHost,stream[is]); #endif offset+=vnread[is]; } } } #endif /* dev_updatefield<<<gridoct2,blockoct2>>>(cpu->hyd_stencil,nread,stride,cpu,dxcur,dtnew); */ hipDeviceSynchronize(); // ------------ scatter back the FLUXES //hipMemcpy(stencil,cpu->hyd_stencil,nread*sizeof(struct HGRID),hipMemcpyDeviceToHost); nread=offset; nextoct=scatterstencil(curoct0,stencil, nread, cpu,dxcur,dtnew); //t[8]=MPI_Wtime(); nreadtot+=nread; /* ts+=(t[8]-t[6]); */ /* tu+=(t[6]-t[4]); */ /* th+=(t[4]-t[2]); */ /* tg+=(t[2]-t[0]); */ //printf("Start Error Hyd =%s nreadtot=%d\n",hipGetErrorString(hipGetLastError()),nreadtot); }while(nextoct!=NULL); } //printf("GPU | tgat=%e tcal=%e tup=%e tscat=%e\n",tg,th,tu,ts); // Destroying the streams for(is=0;is<cpu->nstream;is++){ hipStreamDestroy(stream[is]); } // printf("Start Error Hyd =%s nreadtot=%d\n",hipGetErrorString(hipGetLastError()),nreadtot); CUDA_CHECK_ERROR("Hydro Stop"); return nreadtot; } #endif
620cfe222589616a0c0eef6ddf62d912102d6dd9.cu
#ifdef WHYDRO2 #include <stdio.h> #include <stdlib.h> #include <string.h> #include "prototypes.h" #include <mpi.h> //#include <cudpp.h> #include "gpu_type.h" #define NITERMAX 10 #define ERRTOL 1e-10 #define DEFDENSG 10. extern "C" struct OCT *gatherstencil(struct OCT *octstart, struct HGRID *stencil, int stride, struct CPUINFO *cpu, int *nread); extern "C" struct OCT *scatterstencil(struct OCT *octstart, struct HGRID *stencil, int stride, struct CPUINFO *cpu, REAL dxcur, REAL dtnew); extern "C" void create_hydstencil_GPU(struct CPUINFO *cpu, int stride); extern "C" int advancehydroGPU(struct OCT **firstoct, int level, struct CPUINFO *cpu, struct HGRID *stencil, int stride, REAL dxcur, REAL dtnew); extern "C" void create_pinned_stencil(struct HGRID **stencil, int stride); extern "C" void destroy_hydstencil_GPU(struct CPUINFO *cpu, int stride); extern "C" void destroy_pinned_stencil(struct HGRID **stencil, int stride); // =================================================================== void create_hydstencil_GPU(struct CPUINFO *cpu, int stride){ cudaMalloc((void **)&(cpu->hyd_stencil),sizeof(struct HGRID)*stride); } // =================================================================== void create_pinned_stencil(struct HGRID **stencil, int stride){ cudaMallocHost( (void**)stencil, sizeof(struct HGRID)*stride ); CUDA_CHECK_ERROR("GPU hydro alloc"); } // =================================================================== void destroy_hydstencil_GPU(struct CPUINFO *cpu, int stride){ cudaFree(cpu->hyd_stencil); } // =================================================================== void destroy_pinned_stencil(struct HGRID **stencil, int stride){ cudaFreeHost(stencil); } __device__ void dinitUtype(struct Utype* u){ u->d=0; u->du=0; u->dv=0; u->dw=0; u->E=0; u->eint=0; #ifdef WRADHYD u->dX=0; #ifdef HELIUM u->dXHE=0; u->dXXHE=0; #endif // HELIUM #endif // WRADHYD } // ============================================================================================================== __device__ void dgetE(struct Wtype *W){ W->E=W->p/(GAMMA-1.)+0.5*W->d*(W->u*W->u+W->v*W->v+W->w*W->w); } // ======================================================= __device__ void getcellnei_gpu_hydro(int cindex, int *neip, int *cell) { switch(cindex){ case 0: neip[0]=0;cell[0]=1; neip[1]=6;cell[1]=1; neip[2]=2;cell[2]=2; neip[3]=6;cell[3]=2; neip[4]=4;cell[4]=4; neip[5]=6;cell[5]=4; break; case 1: neip[0]=6;cell[0]=0; neip[1]=1;cell[1]=0; neip[2]=2;cell[2]=3; neip[3]=6;cell[3]=3; neip[4]=4;cell[4]=5; neip[5]=6;cell[5]=5; break; case 2: neip[0]=0;cell[0]=3; neip[1]=6;cell[1]=3; neip[2]=6;cell[2]=0; neip[3]=3;cell[3]=0; neip[4]=4;cell[4]=6; neip[5]=6;cell[5]=6; break; case 3: neip[0]=6;cell[0]=2; neip[1]=1;cell[1]=2; neip[2]=6;cell[2]=1; neip[3]=3;cell[3]=1; neip[4]=4;cell[4]=7; neip[5]=6;cell[5]=7; break; case 4: neip[0]=0;cell[0]=5; neip[1]=6;cell[1]=5; neip[2]=2;cell[2]=6; neip[3]=6;cell[3]=6; neip[4]=6;cell[4]=0; neip[5]=5;cell[5]=0; break; case 5: neip[0]=6;cell[0]=4; neip[1]=1;cell[1]=4; neip[2]=2;cell[2]=7; neip[3]=6;cell[3]=7; neip[4]=6;cell[4]=1; neip[5]=5;cell[5]=1; break; case 6: neip[0]=0;cell[0]=7; neip[1]=6;cell[1]=7; neip[2]=6;cell[2]=4; neip[3]=3;cell[3]=4; neip[4]=6;cell[4]=2; neip[5]=5;cell[5]=2; break; case 7: neip[0]=6;cell[0]=6; neip[1]=1;cell[1]=6; neip[2]=6;cell[2]=5; neip[3]=3;cell[3]=5; neip[4]=6;cell[4]=3; neip[5]=5;cell[5]=3; break; } } // ==================== converts U -> W __device__ void dU2W(struct Utype *U, struct Wtype *W) { REAL dloc=(U->d==0.?DEFDENSG:U->d); W->d=U->d; W->u=U->du/dloc; W->v=U->dv/dloc; W->w=U->dw/dloc; #ifdef DUAL_E W->p=U->eint*(GAMMA-1.); #else W->p=(GAMMA-1.)*(U->E-((U->du)*(U->du)+(U->dv)*(U->dv)+(U->dw)*(U->dw))/(dloc)*0.5); #endif W->E=U->E; #ifdef WRADHYD W->dX=U->dX; #ifdef HELIUM W->dXHE=U->dXHE; W->dXXHE=U->dXXHE; #endif // HELIUM #endif // WRADHYD W->a=SQRT(GAMMA*W->p/dloc); } // ==================== converts W -> U __device__ void dW2U(struct Wtype *W, struct Utype *U) { U->d=W->d; U->du=W->d*W->u; U->dv=W->d*W->v; U->dw=W->d*W->w; U->eint=W->p/(GAMMA-1.); U->E=W->E; #ifdef WRADHYD U->dX=W->dX; #ifdef HELIUM U->dXHE=W->dXHE; U->dXXHE=W->dXXHE; #endif #endif } // --------------------------------------------------------------- __device__ void dgetflux_X(struct Utype *U, REAL *f) { f[0]=U->du; f[1]=0.5*(3.-GAMMA)*U->du*U->du/U->d+(GAMMA-1.)*U->E-0.5*(GAMMA-1.)*(U->dv*U->dv+U->dw*U->dw)/U->d; f[2]=U->du*U->dv/U->d; f[3]=U->du*U->dw/U->d; f[4]=GAMMA*U->du/U->d*U->E-0.5*(GAMMA-1.)*U->du/(U->d*U->d)*(U->du*U->du+U->dv*U->dv+U->dw*U->dw); #ifdef WRADHYD f[6]=U->du*U->dX/U->d; #ifdef HELIUM f[7]=U->du*U->dXHE/U->d; f[8]=U->du*U->dXXHE/U->d; #endif #endif } // --------------------------------------------------------------- __device__ void dgetflux_Y(struct Utype *U, REAL *f) { f[0]=U->dv; f[1]=U->dv*U->du/U->d; f[2]=0.5*(3.-GAMMA)*U->dv*U->dv/U->d+(GAMMA-1.)*U->E-0.5*(GAMMA-1.)*(U->du*U->du+U->dw*U->dw)/U->d; f[3]=U->dv*U->dw/U->d; f[4]=GAMMA*U->dv/U->d*U->E-0.5*(GAMMA-1.)*U->dv/(U->d*U->d)*(U->du*U->du+U->dv*U->dv+U->dw*U->dw); #ifdef WRADHYD f[6]=U->dv*U->dX/U->d; #ifdef HELIUM f[7]=U->dv*U->dXHE/U->d; f[8]=U->dv*U->dXXHE/U->d; #endif #endif } // --------------------------------------------------------------- __device__ void dgetflux_Z(struct Utype *U, REAL *f) { f[0]=U->dw; f[1]=U->dw*U->du/U->d; f[2]=U->dw*U->dv/U->d; f[3]=0.5*(3.-GAMMA)*U->dw*U->dw/U->d+(GAMMA-1.)*U->E-0.5*(GAMMA-1.)*(U->du*U->du+U->dv*U->dv)/U->d; f[4]=GAMMA*U->dw/U->d*U->E-0.5*(GAMMA-1.)*U->dw/(U->d*U->d)*(U->du*U->du+U->dv*U->dv+U->dw*U->dw); #ifdef WRADHYD f[6]=U->dw*U->dX/U->d; #ifdef HELIUM f[7]=U->dw*U->dXHE/U->d; f[8]=U->dw*U->dXXHE/U->d; #endif #endif } // ================== performs the difference between two Us __device__ void ddiffU(struct Utype *U2, struct Utype *U1, struct Utype *UR){ UR->d =U2->d - U1->d; UR->du=U2->du- U1->du; UR->dv=U2->dv- U1->dv; UR->dw=U2->dw- U1->dw; UR->E =U2->E - U1->E; UR->eint=U2->eint-U1->eint; } // ================== performs the difference between two Ws __device__ void ddiffW(struct Wtype *W2, struct Wtype *W1, struct Wtype *WR){ WR->d=W2->d- W1->d; WR->u=W2->u- W1->u; WR->v=W2->v- W1->v; WR->w=W2->w- W1->w; WR->p=W2->p- W1->p; #ifdef WRADHYD WR->dX=W2->dX- W1->dX; #ifdef HELIUM WR->dXHE=W2->dXHE- W1->dXHE; WR->dXXHE=W2->dXXHE- W1->dXXHE; #endif #endif } // ================= minmod __device__ void dminmod(struct Utype *Um, struct Utype *Up, struct Utype *Ur){ REAL beta=1.; // 1. for MINBEE 2. for SUPERBEE // FLUX LIMITER if(Up->d>0){ Ur->d=FMAX(FMAX(0.,FMIN(beta*Um->d,Up->d)),FMIN(Um->d,beta*Up->d)); } else{ Ur->d=FMIN(FMIN(0.,FMAX(beta*Um->d,Up->d)),FMAX(Um->d,beta*Up->d)); } if(Up->du>0){ Ur->du=FMAX(FMAX(0.,FMIN(beta*Um->du,Up->du)),FMIN(Um->du,beta*Up->du)); } else{ Ur->du=FMIN(FMIN(0.,FMAX(beta*Um->du,Up->du)),FMAX(Um->du,beta*Up->du)); } if(Up->dv>0){ Ur->dv=FMAX(FMAX(0.,FMIN(beta*Um->dv,Up->dv)),FMIN(Um->dv,beta*Up->dv)); } else{ Ur->dv=FMIN(FMIN(0.,FMAX(beta*Um->dv,Up->dv)),FMAX(Um->dv,beta*Up->dv)); } if(Up->dw>0){ Ur->dw=FMAX(FMAX(0.,FMIN(beta*Um->dw,Up->dw)),FMIN(Um->dw,beta*Up->dw)); } else{ Ur->dw=FMIN(FMIN(0.,FMAX(beta*Um->dw,Up->dw)),FMAX(Um->dw,beta*Up->dw)); } if(Up->E>0){ Ur->E=FMAX(FMAX(0.,FMIN(beta*Um->E,Up->E)),FMIN(Um->E,beta*Up->E)); } else{ Ur->E=FMIN(FMIN(0.,FMAX(beta*Um->E,Up->E)),FMAX(Um->E,beta*Up->E)); } } //=============================================== //=============================================== __device__ void dminmod_W(struct Wtype *Wm, struct Wtype *Wp, struct Wtype *Wr){ REAL beta=1.; // 1. for MINBEE 2. for SUPERBEE // FLUX LIMITER if(Wp->d>0){ Wr->d=FMAX(FMAX(0.,FMIN(beta*Wm->d,Wp->d)),FMIN(Wm->d,beta*Wp->d)); } else{ Wr->d=FMIN(FMIN(0.,FMAX(beta*Wm->d,Wp->d)),FMAX(Wm->d,beta*Wp->d)); } #ifdef WRADHYD if(Wp->dX>0){ Wr->dX=FMAX(FMAX(0.,FMIN(beta*Wm->dX,Wp->dX)),FMIN(Wm->dX,beta*Wp->dX)); } else{ Wr->dX=FMIN(FMIN(0.,FMAX(beta*Wm->dX,Wp->dX)),FMAX(Wm->dX,beta*Wp->dX)); } #ifdef HELIUM if(Wp->dXHE>0){ Wr->dXHE=FMAX(FMAX(0.,FMIN(beta*Wm->dXHE,Wp->dXHE)),FMIN(Wm->dXHE,beta*Wp->dXHE)); } else{ Wr->dXHE=FMIN(FMIN(0.,FMAX(beta*Wm->dXHE,Wp->dXHE)),FMAX(Wm->dXHE,beta*Wp->dXHE)); } if(Wp->dXXHE>0){ Wr->dXXHE=FMAX(FMAX(0.,FMIN(beta*Wm->dXXHE,Wp->dXXHE)),FMIN(Wm->dXXHE,beta*Wp->dXXHE)); } else{ Wr->dXXHE=FMIN(FMIN(0.,FMAX(beta*Wm->dXXHE,Wp->dXXHE)),FMAX(Wm->dXXHE,beta*Wp->dXXHE)); } #endif // HELIUM #endif // WRADHYD if(Wp->u>0){ Wr->u=FMAX(FMAX(0.,FMIN(beta*Wm->u,Wp->u)),FMIN(Wm->u,beta*Wp->u)); } else{ Wr->u=FMIN(FMIN(0.,FMAX(beta*Wm->u,Wp->u)),FMAX(Wm->u,beta*Wp->u)); } if(Wp->v>0){ Wr->v=FMAX(FMAX(0.,FMIN(beta*Wm->v,Wp->v)),FMIN(Wm->v,beta*Wp->v)); } else{ Wr->v=FMIN(FMIN(0.,FMAX(beta*Wm->v,Wp->v)),FMAX(Wm->v,beta*Wp->v)); } if(Wp->w>0){ Wr->w=FMAX(FMAX(0.,FMIN(beta*Wm->w,Wp->w)),FMIN(Wm->w,beta*Wp->w)); } else{ Wr->w=FMIN(FMIN(0.,FMAX(beta*Wm->w,Wp->w)),FMAX(Wm->w,beta*Wp->w)); } if(Wp->p>0){ Wr->p=FMAX(FMAX(0.,FMIN(beta*Wm->p,Wp->p)),FMIN(Wm->p,beta*Wp->p)); } else{ Wr->p=FMIN(FMIN(0.,FMAX(beta*Wm->p,Wp->p)),FMAX(Wm->p,beta*Wp->p)); } } // ============= interp minmod ==================================================== __device__ void dinterpminmod(struct Utype *U0, struct Utype *Up, struct Utype *Dx, struct Utype *Dy, struct Utype *Dz,REAL dx,REAL dy,REAL dz){ Up->d =U0->d + dx*Dx->d +dy*Dy->d +dz*Dz->d; Up->du=U0->du + dx*Dx->du +dy*Dy->du +dz*Dz->du; Up->dv=U0->dv + dx*Dx->dv +dy*Dy->dv +dz*Dz->dv; Up->dw=U0->dw + dx*Dx->dw +dy*Dy->dw +dz*Dz->dw; Up->E =U0->E + dx*Dx->E +dy*Dy->E +dz*Dz->E; Up->eint =U0->eint + dx*Dx->eint +dy*Dy->eint +dz*Dz->eint; } // ============= interp minmod ==================================================== __device__ void dinterpminmod_W(struct Wtype *W0, struct Wtype *Wp, struct Wtype *Dx, struct Wtype *Dy, struct Wtype *Dz,REAL dx,REAL dy,REAL dz){ Wp->d =W0->d +dx*Dx->d +dy*Dy->d +dz*Dz->d; Wp->u =W0->u +dx*Dx->u +dy*Dy->u +dz*Dz->u; Wp->v =W0->v +dx*Dx->v +dy*Dy->v +dz*Dz->v; Wp->w =W0->w +dx*Dx->w +dy*Dy->w +dz*Dz->w; Wp->p =W0->p +dx*Dx->p +dy*Dy->p +dz*Dz->p; #ifdef WRADHYD Wp->dX =W0->dX +dx*Dx->dX +dy*Dy->dX +dz*Dz->dX; #ifdef HELIUM Wp->dXHE =W0->dXHE +dx*Dx->dXHE +dy*Dy->dXHE +dz*Dz->dXHE; Wp->dXXHE =W0->dXXHE +dx*Dx->dXXHE +dy*Dy->dXXHE +dz*Dz->dXXHE; #endif #endif } __device__ void dmatrix_jacobian(struct Wtype *W0, REAL dt,REAL dx,struct Wtype *Dx,struct Wtype *Dy,struct Wtype *Dz, struct Wtype *Wt){ REAL M[25]; #ifdef HELIUM REAL W[8]={0.,0.,0.,0.,0.,0.,0.,0.}; #else REAL W[6]={0.,0.,0.,0.,0.,0.}; #endif // HELIUM REAL d[5]; int i,j; #ifdef WRADHYD REAL X; #endif // ===== building the A matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->u; // off_diagonal elements M[0+1*5]=W0->d; M[4+1*5]=W0->d*W0->a*W0->a; M[1+4*5]=1./W0->d; // ===== First Product d[0]=Dx->d; d[1]=Dx->u; d[2]=Dx->v; d[3]=Dx->w; d[4]=Dx->p; for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->u*Dx->dX+W0->dX*Dx->u; #ifdef HELIUM W[6]+=W0->u*Dx->dXHE+W0->dXHE*Dx->u; W[7]+=W0->u*Dx->dXXHE+W0->dXXHE*Dx->u; #endif // HELIUM #endif // WRADHYD // ===== building the B matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->v; // off_diagonal elements M[0+2*5]=W0->d; M[4+2*5]=W0->d*W0->a*W0->a; M[2+4*5]=1./W0->d; // ===== Second Product d[0]=Dy->d; d[1]=Dy->u; d[2]=Dy->v; d[3]=Dy->w; d[4]=Dy->p; for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->v*Dx->dX+W0->dX*Dx->v; #ifdef HELIUM W[6]+=W0->v*Dx->dXHE+W0->dXHE*Dx->v; W[7]+=W0->v*Dx->dXXHE+W0->dXXHE*Dx->v; #endif // HELIUM #endif // WRADHYD // ===== building the C matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->w; // off_diagonal elements M[0+3*5]=W0->d; M[4+3*5]=W0->d*W0->a*W0->a; M[3+4*5]=1./W0->d; d[0]=Dz->d; d[1]=Dz->u; d[2]=Dz->v; d[3]=Dz->w; d[4]=Dz->p; // ===== Third Product for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->w*Dx->dX+W0->w*Dx->dX; #ifdef HELIUM W[6]+=W0->w*Dx->dXHE+W0->dXHE*Dx->w; W[7]+=W0->w*Dx->dXXHE+W0->dXXHE*Dx->w; #endif // HELIUM #endif // WRADHYD // ==== Final correction for(i=0;i<6;i++){ W[i]*=(-dt/dx*0.5); } Wt->d=W[0]; Wt->u=W[1]; Wt->v=W[2]; Wt->w=W[3]; Wt->p=W[4]; #ifdef WRADHYD Wt->dX=W[5]; #ifdef HELIUM Wt->dXHE=W[6]; Wt->dXXHE=W[7]; #endif // HELIUM #endif // WRADHYD } //======================================================================================================================================== __device__ void olddmatrix_jacobian(struct Wtype *W0, REAL dt,REAL dx,struct Wtype *Dx,struct Wtype *Dy,struct Wtype *Dz, struct Wtype *Wt){ REAL M[25]; #ifdef HELIUM REAL W[8]={0.,0.,0.,0.,0.,0.,0.,0.}; #else REAL W[6]={0.,0.,0.,0.,0.,0.}; #endif // HELIUM REAL d[5]; int i,j; #ifdef WRADHYD REAL X; #endif // ===== building the A matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->u; // off_diagonal elements M[0+1*5]=W0->d; M[4+1*5]=W0->d*W0->a*W0->a; M[1+4*5]=1./W0->d; // ===== First Product d[0]=Dx->d; d[1]=Dx->u; d[2]=Dx->v; d[3]=Dx->w; d[4]=Dx->p; for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->u*Dx->dX+W0->dX*Dx->u; #ifdef HELIUM W[6]+=W0->u*Dx->dXHE+W0->dXHE*Dx->u; W[7]+=W0->u*Dx->dXXHE+W0->dXXHE*Dx->u; #endif // HELIUM #endif // WRADHYD // ===== building the B matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->v; // off_diagonal elements M[0+2*5]=W0->d; M[4+2*5]=W0->d*W0->a*W0->a; M[2+4*5]=1./W0->d; // ===== Second Product d[0]=Dy->d; d[1]=Dy->u; d[2]=Dy->v; d[3]=Dy->w; d[4]=Dy->p; for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->v*Dx->dX+W0->dX*Dx->v; #ifdef HELIUM W[6]+=W0->v*Dx->dXHE+W0->dXHE*Dx->v; W[7]+=W0->v*Dx->dXXHE+W0->dXXHE*Dx->v; #endif // HELIUM #endif // WRADHYD // ===== building the C matrix memset(M,0,25*sizeof(REAL)); // diagonal elements for(i=0;i<5;i++) M[i+i*5]=W0->w; // off_diagonal elements M[0+3*5]=W0->d; M[4+3*5]=W0->d*W0->a*W0->a; M[3+4*5]=1./W0->d; d[0]=Dz->d; d[1]=Dz->u; d[2]=Dz->v; d[3]=Dz->w; d[4]=Dz->p; // ===== Third Product for(j=0;j<5;j++){ for(i=0;i<5;i++){ W[i]+=M[i+j*5]*d[j]; } } #ifdef WRADHYD W[5]+=W0->w*Dx->dX+W0->w*Dx->dX; #ifdef HELIUM W[6]+=W0->w*Dx->dXHE+W0->dXHE*Dx->w; W[7]+=W0->w*Dx->dXXHE+W0->dXXHE*Dx->w; #endif // HELIUM #endif // WRADHYD // ==== Final correction for(i=0;i<6;i++){ W[i]*=(-dt/dx*0.5); } Wt->d=W[0]; Wt->u=W[1]; Wt->v=W[2]; Wt->w=W[3]; Wt->p=W[4]; #ifdef WRADHYD Wt->dX=W[5]; #ifdef HELIUM Wt->dXHE=W[6]; Wt->dXXHE=W[7]; #endif // HELIUM #endif // WRADHYD } __device__ void dMUSCL_BOUND2(struct HGRID *stencil, int ioct, int icell, struct Wtype *Wi,REAL dt,REAL dx){ struct OCT * oct; struct Wtype *W0; struct Wtype *Wp; struct Wtype *Wm; struct Wtype Dp,Dm; struct Wtype D[3]; struct Wtype Wt; int inei2; int vcell[6],vnei[6]; int dir; int idir; int shift; #ifdef WGRAV REAL f[3]; struct Utype S; struct Utype U; #endif // WGRAV getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors W0=&(stencil->oct[ioct].cell[icell].field); // Limited Slopes shift=1; for(dir=0;dir<3;dir++){ inei2=2*dir; if(vnei[inei2]==6){ Wm=&(stencil->oct[ioct].cell[vcell[inei2]].field); } else{ Wm=&(stencil->oct[ioct-shift].cell[vcell[inei2]].field); } inei2=2*dir+1; if(vnei[inei2]==6){ Wp=&(stencil->oct[ioct].cell[vcell[inei2]].field); } else{ Wp=&(stencil->oct[ioct+shift].cell[vcell[inei2]].field); } ddiffW(Wp,W0,&Dp); ddiffW(W0,Wm,&Dm); dminmod_W(&Dm,&Dp,D+dir); shift*=3; } // build jacobian matrix product dmatrix_jacobian(W0,dt,dx,&D[0],&D[1],&D[2],&Wt); // Here Wt contains the evolution of the state // READY TO EVOLVE EXTRAPOLATED VALUE REAL ix[]={-0.5,0.5,0.0,0.0,0.0,0.0}; REAL iy[]={0.0,0.0,-0.5,0.5,0.0,0.0}; REAL iz[]={0.0,0.0,0.0,0.0,-0.5,0.5}; #ifdef WGRAV #ifndef NOCOUPLE memcpy(f,stencil->oct[ioct].cell[icell].f,sizeof(REAL)*3); #ifdef CONSERVATIVE S.d =0.; S.du=-W0->d*f[0]*0.5*dt; S.dv=-W0->d*f[1]*0.5*dt; S.dw=-W0->d*f[2]*0.5*dt; S.E =-(W0->d*W0->u*f[0]+W0->d*W0->v*f[1]+W0->d*W0->w*f[2])*dt*0.5; #endif // CONSERVATIVE #endif // NOCOUPLE #endif // WGRAV for(idir=0;idir<6;idir++){ Wi[idir].d = W0->d+ix[idir]*D[0].d+iy[idir]*D[1].d+iz[idir]*D[2].d+Wt.d; Wi[idir].u = W0->u+ix[idir]*D[0].u+iy[idir]*D[1].u+iz[idir]*D[2].u+Wt.u; Wi[idir].v = W0->v+ix[idir]*D[0].v+iy[idir]*D[1].v+iz[idir]*D[2].v+Wt.v; Wi[idir].w = W0->w+ix[idir]*D[0].w+iy[idir]*D[1].w+iz[idir]*D[2].w+Wt.w; Wi[idir].p = FMAX(W0->p+ix[idir]*D[0].p+iy[idir]*D[1].p+iz[idir]*D[2].p+Wt.p,PMIN); #ifdef WRADHYD Wi[idir].dX = W0->dX+ix[idir]*D[0].dX+iy[idir]*D[1].dX+iz[idir]*D[2].dX+Wt.dX; #ifdef HELIUM Wi[idir].dXHE = W0->dXHE+ix[idir]*D[0].dXHE+iy[idir]*D[1].dXHE+iz[idir]*D[2].dXHE+Wt.dXHE; Wi[idir].dXXHE = W0->dXXHE+ix[idir]*D[0].dXXHE+iy[idir]*D[1].dXXHE+iz[idir]*D[2].dXXHE+Wt.dXXHE; #endif // HELIUM #endif // WRADHYD /* if(Wi[idir].d<0) { */ /* printf("neg d in extrapolation %e %e %e %e %e\n",Wi[idir].d,W0->d,D[0].d,D[1].d,D[2].d); */ /* abort(); */ /* } */ //if(Wi[idir].p==PMIN) printf("%e %e \n",W0->p,W0->p+ix[idir]*D[0].p+iy[idir]*D[1].p+iz[idir]*D[2].p+Wt.p); #ifdef WGRAV #ifndef NOCOUPLE #ifdef PRIMITIVE Wi[idir].u+=-f[0]*0.5*dt; Wi[idir].v+=-f[1]*0.5*dt; Wi[idir].w+=-f[2]*0.5*dt; #endif // PRIMITIVE #ifdef CONSERVATIVE W2U(&Wi[idir],&U); U.d +=S.d; U.du +=S.du; U.dv +=S.dv; U.dw +=S.dw; U.E +=S.E; U2W(&U,&Wi[idir]); #endif // CONSERVATIVE #endif // NOCOUPLE #endif // WGRAV dgetE(Wi+idir); Wi[idir].a=SQRT(GAMMA*Wi[idir].p/Wi[idir].d); #ifdef WRADHYD REAL X0=W0->dX/(W0->d*(1.-YHE)); Wi[idir].dX=Wi[idir].d*(1.-YHE)*X0; #ifdef HELIUM REAL XHE0=W0->dXHE/(W0->d*(YHE)); Wi[idir].dXHE=Wi[idir].d*(YHE)*XHE0; REAL XXHE0=W0->dXXHE/(W0->d*(YHE)); Wi[idir].dXXHE=Wi[idir].d*(YHE)*XXHE0; #endif // HELIUM #endif // WRADHYD } } // ============================================== __device__ void olddMUSCL_BOUND2(struct HGRID *stencil, int ioct, int icell, struct Wtype *Wi,REAL dt,REAL dx){ struct Wtype *W0; struct Wtype *Wp; struct Wtype *Wm; struct Wtype Dp,Dm; struct Wtype D[3]; struct Wtype Wt; int inei2; int vcell[6],vnei[6]; int dir; int idir; int shift; #ifdef WGRAV REAL f[3]; #ifdef CONSERVATIVE struct Utype S; struct Utype U; #endif #endif getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors W0=&(stencil->oct[ioct].cell[icell].field); // Limited Slopes shift=1; for(dir=0;dir<3;dir++){ inei2=2*dir; if(vnei[inei2]==6){ Wm=&(stencil->oct[ioct].cell[vcell[inei2]].field); } else{ Wm=&(stencil->oct[ioct-shift].cell[vcell[inei2]].field); } inei2=2*dir+1; if(vnei[inei2]==6){ Wp=&(stencil->oct[ioct].cell[vcell[inei2]].field); } else{ Wp=&(stencil->oct[ioct+shift].cell[vcell[inei2]].field); } ddiffW(Wp,W0,&Dp); ddiffW(W0,Wm,&Dm); dminmod_W(&Dm,&Dp,D+dir); shift*=3; } // build jacobian matrix product dmatrix_jacobian(W0,dt,dx,&D[0],&D[1],&D[2],&Wt); // Here Wt contains the evolution of the state // READY TO EVOLVE EXTRAPOLATED VALUE REAL ix[]={-0.5,0.5,0.0,0.0,0.0,0.0}; REAL iy[]={0.0,0.0,-0.5,0.5,0.0,0.0}; REAL iz[]={0.0,0.0,0.0,0.0,-0.5,0.5}; #ifdef WGRAV #ifndef NOCOUPLE memcpy(f,stencil->oct[ioct].cell[icell].f,sizeof(REAL)*3); #ifdef CONSERVATIVE S.d =0.; S.du=-W0->d*f[0]*0.5*dt; S.dv=-W0->d*f[1]*0.5*dt; S.dw=-W0->d*f[2]*0.5*dt; S.E =-(W0->d*W0->u*f[0]+W0->d*W0->v*f[1]+W0->d*W0->w*f[2])*dt*0.5; #endif #endif #endif for(idir=0;idir<6;idir++){ Wi[idir].d = W0->d+ix[idir]*D[0].d+iy[idir]*D[1].d+iz[idir]*D[2].d+Wt.d; Wi[idir].u = W0->u+ix[idir]*D[0].u+iy[idir]*D[1].u+iz[idir]*D[2].u+Wt.u; Wi[idir].v = W0->v+ix[idir]*D[0].v+iy[idir]*D[1].v+iz[idir]*D[2].v+Wt.v; Wi[idir].w = W0->w+ix[idir]*D[0].w+iy[idir]*D[1].w+iz[idir]*D[2].w+Wt.w; Wi[idir].p = FMAX(W0->p+ix[idir]*D[0].p+iy[idir]*D[1].p+iz[idir]*D[2].p+Wt.p,PMIN); #ifdef WRADHYD Wi[idir].dX = W0->dX+ix[idir]*D[0].dX+iy[idir]*D[1].dX+iz[idir]*D[2].dX+Wt.dX; #ifdef HELIUM Wi[idir].dXHE = W0->dXHE+ix[idir]*D[0].dXHE+iy[idir]*D[1].dXHE+iz[idir]*D[2].dXHE+Wt.dXHE; Wi[idir].dXXHE = W0->dXXHE+ix[idir]*D[0].dXXHE+iy[idir]*D[1].dXXHE+iz[idir]*D[2].dXXHE+Wt.dXXHE; #endif #endif /* if(Wi[idir].p<0) abort(); */ /* if(Wi[idir].d<0) abort(); */ #ifdef WGRAV #ifndef NOCOUPLE #ifdef PRIMITIVE Wi[idir].u+=-f[0]*0.5*dt; Wi[idir].v+=-f[1]*0.5*dt; Wi[idir].w+=-f[2]*0.5*dt; #endif #ifdef CONSERVATIVE dW2U(&Wi[idir],&U); U.d +=S.d; U.du +=S.du; U.dv +=S.dv; U.dw +=S.dw; U.E +=S.E; dU2W(&U,&Wi[idir]); #endif #endif #endif dgetE(Wi+idir); Wi[idir].a=SQRT(GAMMA*Wi[idir].p/Wi[idir].d); #ifdef WRADHYD REAL X0=W0->dX/(W0->d*(1.-YHE)); Wi[idir].dX=Wi[idir].d*(1.-YHE)*X0; #ifdef HELIUM REAL XHE0=W0->dXHE/(W0->d*(YHE)); Wi[idir].dXHE=Wi[idir].d*(YHE)*XHE0; REAL XXHE0=W0->dXXHE/(W0->d*(YHE)); Wi[idir].dXXHE=Wi[idir].d*(YHE)*XXHE0; #endif #endif } } //======================================================================================== __device__ REAL dfrootprime(REAL p, struct Wtype1D *WL, struct Wtype1D *WR) { REAL fL,fR; REAL AL,AR,BL,BR; AL=2./((GAMMA+1.)*WL->d); AR=2./((GAMMA+1.)*WR->d); BL=(GAMMA-1.)/(GAMMA+1.)*WL->p; BR=(GAMMA-1.)/(GAMMA+1.)*WR->p; fL=(p>WL->p?SQRT(AL/(BL+p))*(1.-(p-WL->p)/(2.*(BL+p))):POW(p/WL->p,-(GAMMA+1)/(2.*GAMMA))/(WL->d*WL->a)); fR=(p>WR->p?SQRT(AR/(BR+p))*(1.-(p-WR->p)/(2.*(BR+p))):POW(p/WR->p,-(GAMMA+1)/(2.*GAMMA))/(WR->d*WR->a)); return fL+fR; } // ------------------------------------ __device__ REAL dfroot(REAL p, struct Wtype1D *WL, struct Wtype1D *WR, REAL *u) { REAL fL,fR; REAL AL,AR,BL,BR; REAL Deltau; AL=2./((GAMMA+1.)*WL->d); AR=2./((GAMMA+1.)*WR->d); BL=(GAMMA-1.)/(GAMMA+1.)*WL->p; BR=(GAMMA-1.)/(GAMMA+1.)*WR->p; fL=(p>WL->p?(p-WL->p)*SQRT(AL/(BL+p)):2.*WL->a/(GAMMA-1.)*(POW(p/WL->p,(GAMMA-1)/(2.*GAMMA))-1.)); fR=(p>WR->p?(p-WR->p)*SQRT(AR/(BR+p)):2.*WR->a/(GAMMA-1.)*(POW(p/WR->p,(GAMMA-1)/(2.*GAMMA))-1.)); Deltau=WR->u-WL->u; *u=0.5*(WL->u+WR->u)+0.5*(fR-fL); return fL+fR+Deltau; } //======================================================================================== //======================================================================================== __device__ REAL dfindPressure(struct Wtype1D *WL, struct Wtype1D *WR, int *niter, REAL *u) { REAL ptr,pts,ppv; REAL ptr0,pts0,ppv0; REAL p,porg,dp; int i; REAL err; REAL unsurz=(2.0*GAMMA)/(GAMMA-1.0); REAL AL,AR,BL,BR,GL,GR; REAL pmin,pmax; REAL u2; pmin=FMIN(WL->p,WR->p); pmax=FMAX(WL->p,WR->p); // EXACT SOLVER // hybrid guess for pressure AL=2./((GAMMA+1.)*WL->d); AR=2./((GAMMA+1.)*WR->d); BL=(GAMMA-1.)/(GAMMA+1.)*WL->p; BR=(GAMMA-1.)/(GAMMA+1.)*WR->p; ppv0=0.5*(WL->p+WR->p)-0.125*(WR->u-WL->u)*(WR->d+WL->d)*(WR->a+WL->a); ptr0=POW((WL->a+WR->a-0.5*(GAMMA-1)*(WR->u-WL->u))/(WL->a/POW(WL->p,1./unsurz)+WR->a/POW(WR->p,1./unsurz)),unsurz); ppv=FMAX(ERRTOL,ppv0); ptr=FMAX(ERRTOL,ptr0); GL=SQRT(AL/(ppv+BL)); GR=SQRT(AR/(ppv+BR)); pts0=(GL*WL->p+GR*WR->p-(WR->u-WL->u))/(GL+GR); pts=FMAX(ERRTOL,pts0); if(((pmax/pmin)<2.0)&&((pmin<=ppv)&&(ppv<=pmax))){ p=ppv; } else{ if(ppv<pmin){ p=ptr; } else{ p=pts; } } //p=0.5*(WL->p+WR->p); //p=FMAX(p,ERRTOL); *niter=0; for(i=0;i<NITERMAX;i++) { dp=dfroot(p,WL,WR,&u2)/dfrootprime(p,WL,WR); if(FABS(dp)<ERRTOL) break; while((p-dp)<0){ dp=dp*0.5; } porg=p; p=p-dp; err=2.*FABS(p-porg)/(FABS(p+porg)); *niter=*niter+1; if(err<ERRTOL) break; if(dfroot(p,WL,WR,&u2)<ERRTOL) break; } dfroot(p,WL,WR,&u2); // last calculation to get u; *u=(REAL)u2; return p; } //======================================================================================== //======================================================================================== __device__ REAL dfindPressure_Hybrid(struct Wtype1D *WL, struct Wtype1D *WR, int *niter, REAL *ustar){ REAL ppvrs; REAL dbar,abar; REAL pmax,pmin,pstar; REAL AL,AR,BL,BR,GL,GR; dbar=0.5*(WL->d+WR->d); abar=0.5*(WL->a+WR->a); ppvrs=0.5*((WL->p+WR->p)+(WL->u-WR->u)*dbar*abar); pmax=FMAX(WL->p,WR->p); pmin=FMIN(WL->p,WR->p); pstar=ppvrs; //printf("dbar=%e abar=%e ppvrs=%e pmax=%e pmin=%e pstar=%e\n",dbar,abar,ppvrs,pmax,pmin,pstar); if(((pmax/pmin)<2.)&&((pmin<pstar)&&(pstar<pmax))){ // PVRS CASE pstar=ppvrs; *ustar=0.5*((WL->u+WR->u)+(WL->p-WR->p)/(dbar*abar)); } else{ if(pstar<pmin){ //TRRS CASE REAL z=(GAMMA-1.)/(2.*GAMMA); REAL iz=(2.*GAMMA)/(GAMMA-1.); pstar=POW((WL->a+WR->a-(GAMMA-1.)/2.*(WR->u-WL->u))/(WL->a/POW(WL->p,z)+WR->a/POW(WR->p,z)),iz); *ustar=WL->u-2.*WL->a/(GAMMA-1.)*(POW(pstar/WL->p,z)-1.); } else{ //TSRS CASE REAL p0; p0=FMAX(0.,ppvrs); AL=2./((GAMMA+1.)*WL->d); AR=2./((GAMMA+1.)*WR->d); BL=(GAMMA-1.)/(GAMMA+1.)*WL->p; BR=(GAMMA-1.)/(GAMMA+1.)*WR->p; GL=SQRT(AL/(p0+BL)); GR=SQRT(AR/(p0+BR)); pstar=(GL*WL->p+GR*WR->p-(WR->u-WL->u))/(GL+GR); *ustar=0.5*((WL->u+WR->u)+(pstar-WR->p)*GR-(pstar-WL->p)*GL); } } return pstar; } //==================================================================== __device__ void dspeedestimateX_HLLC(struct Wtype *WL,struct Wtype *WR, REAL *SL, REAL *SR, REAL *pstar, REAL *ustar){ REAL qL,qR; struct Wtype1D WLloc; struct Wtype1D WRloc; int n; WLloc.d=WL->d; WLloc.u=WL->u; WLloc.p=WL->p; WLloc.a=SQRT(GAMMA*WLloc.p/WLloc.d); WRloc.d=WR->d; WRloc.u=WR->u; WRloc.p=WR->p; WRloc.a=SQRT(GAMMA*WRloc.p/WRloc.d); //printf("%e %e %e %e ||| %e %e %e %e\n",WLloc.d,WLloc.u,WLloc.p,WLloc.a,WLloc.d,WRloc.u,WRloc.p,WRloc.a); #if 1 (*pstar)= dfindPressure_Hybrid(&WLloc,&WRloc,&n,ustar); if((*pstar)<0) (*pstar)=dfindPressure(&WLloc,&WRloc,&n,ustar); //if((*pstar)<=0) printf("shhh pstar=%e %e %d\n",*pstar,*ustar,n); qL=(*pstar<=WL->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WL->p-1.))); qR=(*pstar<=WR->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WR->p-1.))); *SL=WLloc.u-WLloc.a*qL; *SR=WRloc.u+WRloc.a*qR; if((*SL)>(*SR)){ (*SL)=FMIN(WLloc.u-WLloc.a,WRloc.u-WRloc.a); (*SR)=FMAX(WLloc.u+WLloc.a,WRloc.u+WRloc.a); } #endif //if(isnan(*ustar)) printf("Hehey\n"); } //==================================================================== void __device__ dspeedestimateY_HLLC(struct Wtype *WL,struct Wtype *WR, REAL *SL, REAL *SR, REAL *pstar, REAL *ustar){ REAL qL,qR; struct Wtype1D WLloc; struct Wtype1D WRloc; int n; WLloc.d=WL->d; WLloc.u=WL->v; WLloc.p=WL->p; WLloc.a=SQRT(GAMMA*WLloc.p/WLloc.d); WRloc.d=WR->d; WRloc.u=WR->v; WRloc.p=WR->p; WRloc.a=SQRT(GAMMA*WRloc.p/WRloc.d); (*pstar)=dfindPressure_Hybrid(&WLloc,&WRloc,&n,ustar); if((*pstar)<0) (*pstar)=dfindPressure(&WLloc,&WRloc,&n,ustar); // if((*pstar)<0) abort(); qL=(*pstar<=WL->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WL->p-1.))); qR=(*pstar<=WR->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WR->p-1.))); *SL=WLloc.u-WLloc.a*qL; *SR=WRloc.u+WRloc.a*qR; if((*SL)>(*SR)){ (*SL)=FMIN(WLloc.u-WLloc.a,WRloc.u-WRloc.a); (*SR)=FMAX(WLloc.u+WLloc.a,WRloc.u+WRloc.a); //abort(); } // if((*SL)>(*SR)) abort(); //if(isnan(*ustar)) printf("Hehey y\n"); } //==================================================================== void __device__ dspeedestimateZ_HLLC(struct Wtype *WL,struct Wtype *WR, REAL *SL, REAL *SR, REAL *pstar, REAL *ustar){ REAL qL,qR; struct Wtype1D WLloc; struct Wtype1D WRloc; int n; WLloc.d=WL->d; WLloc.u=WL->w; WLloc.p=WL->p; WLloc.a=SQRT(GAMMA*WLloc.p/WLloc.d); WRloc.d=WR->d; WRloc.u=WR->w; WRloc.p=WR->p; WRloc.a=SQRT(GAMMA*WRloc.p/WRloc.d); (*pstar)=dfindPressure_Hybrid(&WLloc,&WRloc,&n,ustar); if((*pstar)<0) (*pstar)=dfindPressure(&WLloc,&WRloc,&n,ustar); //if((*pstar)<0) abort(); qL=(*pstar<=WL->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WL->p-1.))); qR=(*pstar<=WR->p?1.:SQRT(1.+(GAMMA+1.)/(2.*GAMMA)*((*pstar)/WR->p-1.))); *SL=WLloc.u-WLloc.a*qL; *SR=WRloc.u+WRloc.a*qR; if((*SL)>(*SR)){ (*SL)=FMIN(WLloc.u-WLloc.a,WRloc.u-WRloc.a); (*SR)=FMAX(WLloc.u+WLloc.a,WRloc.u+WRloc.a); //abort(); } //if((*SL)>(*SR)) abort(); //if(isnan(*ustar)) printf("Hehey z\n"); } // ============================================================================================= __global__ void dhydroM_sweepZ(struct HGRID *stencil,int nread,REAL dx, REAL dt){ int inei,icell,iface; int i; int vnei[6],vcell[6]; REAL FL[NVAR],FR[NVAR]; struct Utype Uold; struct Wtype Wold; REAL pstar,ustar; struct Wtype WT[6]; // FOR MUSCL RECONSTRUCTION struct Wtype WC[6]; // FOR MUSCL RECONSTRUCTION struct Utype UC[2]; struct Utype UN[2]; struct Wtype WN[2]; int ioct[7]={12,14,10,16,4,22,13}; int idxnei[6]={1,0,3,2,5,4}; struct Wtype *curcell; REAL SL,SR; int ffact[2]={0,0}; REAL fact; struct Utype Us; dinitUtype(&Us); REAL ebar; REAL ecen=0.; REAL divu,divuloc; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; for(icell=0;icell<8;icell++){ // we scan the cells getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors if(i<nread){ memset(FL,0,sizeof(REAL)*NVAR); memset(FR,0,sizeof(REAL)*NVAR); #if 1 // Getting the original state =========================== curcell=&(stencil[i].oct[ioct[6]].cell[icell].field); divu=stencil[i].New.cell[icell].divu; Wold.d=curcell->d; Wold.u=curcell->u; Wold.v=curcell->v; Wold.w=curcell->w; Wold.p=curcell->p; Wold.a=SQRT(GAMMA*Wold.p/Wold.d); dW2U(&Wold,&Uold); // primitive -> conservative REAL eold=Uold.eint; /* // MUSCL STATE RECONSTRUCTION */ memset(ffact,0,sizeof(int)*2); dMUSCL_BOUND2(stencil+i, 13, icell, WC,dt,dx);// central for(iface=0;iface<2;iface++){ inei=iface+4; memcpy(WC+iface,WC+inei,sizeof(struct Wtype)); // moving the data towards idx=0,1 //memcpy(WC+iface,&(stencil[i].oct[13].cell[inei].field),sizeof(struct Wtype)); // moving the data towards idx=0,1 //HACK dW2U(WC+iface,UC+iface); } // Neighbor MUSCL reconstruction for(iface=0;iface<2;iface++){ inei=iface+4; dMUSCL_BOUND2(stencil+i, ioct[vnei[inei]], vcell[inei], WT,dt,dx);// memcpy(WN+iface,WT+idxnei[inei],sizeof(struct Wtype)); //memcpy(WN+iface,&(stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].field),sizeof(struct Wtype)); //HACK dW2U(WN+iface,UN+iface); if(!stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].split){ ffact[iface]=1; // we cancel the contriubtion of split neighbors } } // Z DIRECTION ========================================================================= // --------- solving the Riemann Problems BOTTOM // Switching to Split description /* // =========================================== */ #ifdef RIEMANN_HLLC dspeedestimateZ_HLLC(&WN[0],&WC[0],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Z(&UN[0],FL); memcpy(&Us,&UN[0],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Z(&UC[0],FL); memcpy(&Us,&UC[0],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Z(&UN[0],FL); fact=WN[0].d*(SL-WN[0].w)/(SL-ustar); FL[0]+=(fact*1. -UN[0].d )*SL; FL[1]+=(fact*WN[0].u -UN[0].du)*SL; FL[2]+=(fact*WN[0].v -UN[0].dv)*SL; FL[3]+=(fact*ustar -UN[0].dw)*SL; FL[4]+=(fact*(UN[0].E/UN[0].d+(ustar-WN[0].w)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].w))))-UN[0].E )*SL; Us.d =(fact*1.); Us.du=(fact*WN[0].u); Us.dv=(fact*WN[0].v); Us.dw=(fact*ustar); Us.E =(fact*(UN[0].E/UN[0].d+(ustar-WN[0].w)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].w))))); #ifdef WRADHYD FL[6]+=(fact*WN[0].dX/WN[0].d -UN[0].dX)*SL; #ifdef HELIUM FL[7]+=(fact*WN[0].dXHE/WN[0].d -UN[0].dXHE)*SL; FL[8]+=(fact*WN[0].dXXHE/WN[0].d -UN[0].dXXHE)*SL; #endif // HELIUM #endif // WRADHYD } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Z(&UC[0],FL); fact=WC[0].d*(SR-WC[0].w)/(SR-ustar); FL[0]+=(fact*1. -UC[0].d )*SR; FL[1]+=(fact*WC[0].u -UC[0].du)*SR; FL[2]+=(fact*WC[0].v -UC[0].dv)*SR; FL[3]+=(fact*ustar -UC[0].dw)*SR; FL[4]+=(fact*(UC[0].E/UC[0].d+(ustar-WC[0].w)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].w))))-UC[0].E )*SR; Us.d =(fact*1.); Us.du=(fact*WC[0].u); Us.dv=(fact*WC[0].v); Us.dw=(fact*ustar); Us.E =(fact*(UC[0].E/UC[0].d+(ustar-WC[0].w)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].w))))); #ifdef WRADHYD FL[6]+=(fact*WC[0].dX/WC[0].d -UC[0].dX)*SR; #ifdef HELIUM FL[7]+=(fact*WC[0].dXHE/WC[0].d -UC[0].dXHE)*SR; FL[8]+=(fact*WC[0].dXXHE/WC[0].d -UC[0].dXXHE)*SR; #endif // HELIUM #endif // WRADHYD } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.dw/Us.d)*eold; FL[5]=(Us.dw/Us.d*ebar); divu+=-divuloc; #endif // RIEMANN_HLLC // =========================================== // --------- solving the Riemann Problems TOP // Switching to Split description //===================================================== #ifdef RIEMANN_HLLC dspeedestimateZ_HLLC(&WC[1],&WN[1],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Z(&UC[1],FR); memcpy(&Us,&UC[1],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Z(&UN[1],FR); memcpy(&Us,&UN[1],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Z(&UC[1],FR); fact=WC[1].d*(SL-WC[1].w)/(SL-ustar); FR[0]+=(fact*1. -UC[1].d )*SL; FR[1]+=(fact*WC[1].u -UC[1].du)*SL; FR[2]+=(fact*WC[1].v -UC[1].dv)*SL; FR[3]+=(fact*ustar -UC[1].dw)*SL; FR[4]+=(fact*(UC[1].E/UC[1].d+(ustar-WC[1].w)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].w))))-UC[1].E )*SL; Us.d =(fact*1.); Us.du=(fact*WC[1].u); Us.dv=(fact*WC[1].v); Us.dw=(fact*ustar); Us.E =(fact*(UC[1].E/UC[1].d+(ustar-WC[1].w)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].w))))); #ifdef WRADHYD FR[6]+=(fact*WC[1].dX/WC[1].d -UC[1].dX)*SL; #ifdef HELIUM FR[7]+=(fact*WC[1].dXHE/WC[1].d -UC[1].dXHE)*SL; FR[8]+=(fact*WC[1].dXXHE/WC[1].d -UC[1].dXXHE)*SL; #endif // HELIUM #endif // WRADHYD } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Z(&UN[1],FR); fact=WN[1].d*(SR-WN[1].w)/(SR-ustar); FR[0]+=(fact*1. -UN[1].d )*SR; FR[1]+=(fact*WN[1].u -UN[1].du)*SR; FR[2]+=(fact*WN[1].v -UN[1].dv)*SR; FR[3]+=(fact*ustar -UN[1].dw)*SR; FR[4]+=(fact*(UN[1].E/UN[1].d+(ustar-WN[1].w)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].w))))-UN[1].E )*SR; Us.d =(fact*1.); Us.du=(fact*WN[1].u); Us.dv=(fact*WN[1].v); Us.dw=(fact*ustar); Us.E =(fact*(UN[1].E/UN[1].d+(ustar-WN[1].w)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].w))))); #ifdef WRADHYD FR[6]+=(fact*WN[1].dX/WN[1].d -UN[1].dX)*SR; #ifdef HELIUM FR[7]+=(fact*WN[1].dXHE/WN[1].d -UN[1].dXHE)*SR; FR[8]+=(fact*WN[1].dXXHE/WN[1].d -UN[1].dXXHE)*SR; #endif // HELIUM #endif // WRADHYD } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.dw/Us.d)*eold; FR[5]=(Us.dw/Us.d*ebar); divu+= divuloc; #endif // RIEMANN_HLLC //========================= copy the fluxes // Cancelling the fluxes from splitted neighbours #endif for(iface=0;iface<NVAR;iface++) FL[iface]*=ffact[0]; for(iface=0;iface<NVAR;iface++) FR[iface]*=ffact[1]; memcpy(stencil[i].New.cell[icell].flux+4*NVAR,FL,sizeof(REAL)*NVAR); memcpy(stencil[i].New.cell[icell].flux+5*NVAR,FR,sizeof(REAL)*NVAR); stencil[i].New.cell[icell].divu=divu; // ready for the next cell } //ready for the next oct } } __global__ void olddhydroM_sweepZ(struct HGRID *stencil, int nread,REAL dx, REAL dt){ int inei,icell,iface; int i; int vnei[6],vcell[6]; REAL FL[NVAR],FR[NVAR]; struct Utype Uold; struct Wtype Wold; REAL pstar,ustar; struct Wtype WT[6]; // FOR MUSCL RECONSTRUCTION struct Wtype WC[6]; // FOR MUSCL RECONSTRUCTION struct Utype UC[2]; struct Utype UN[2]; struct Wtype WN[2]; int ioct[7]={12,14,10,16,4,22,13}; int idxnei[6]={1,0,3,2,5,4}; struct Wtype *curcell; REAL SL,SR; int ffact[2]={0,0}; REAL fact; struct Utype Us; REAL ebar; REAL divu,divuloc; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors memset(FL,0,sizeof(REAL)*NVAR); memset(FR,0,sizeof(REAL)*NVAR); #if 1 // Getting the original state =========================== curcell=&(stencil[i].oct[ioct[6]].cell[icell].field); #ifdef DUAL_E divu=stencil[i].New.cell[icell].divu; #endif Wold.d=curcell->d; Wold.u=curcell->u; Wold.v=curcell->v; Wold.w=curcell->w; Wold.p=curcell->p; Wold.a=SQRT(GAMMA*Wold.p/Wold.d); dW2U(&Wold,&Uold); // primitive -> conservative REAL eold=Uold.eint; /* // MUSCL STATE RECONSTRUCTION */ memset(ffact,0,sizeof(int)*2); dMUSCL_BOUND2(stencil+i, 13, icell, WC,dt,dx);// central for(iface=0;iface<2;iface++){ inei=iface+4; memcpy(WC+iface,WC+inei,sizeof(struct Wtype)); // moving the data towards idx=0,1 //memcpy(WC+iface,&(stencil[i].oct[13].cell[inei].field),sizeof(struct Wtype)); // moving the data towards idx=0,1 //HACK dW2U(WC+iface,UC+iface); } // Neighbor MUSCL reconstruction for(iface=0;iface<2;iface++){ inei=iface+4; dMUSCL_BOUND2(stencil+i, ioct[vnei[inei]], vcell[inei], WT,dt,dx);// memcpy(WN+iface,WT+idxnei[inei],sizeof(struct Wtype)); //memcpy(WN+iface,&(stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].field),sizeof(struct Wtype)); //HACK dW2U(WN+iface,UN+iface); if(!stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].split){ ffact[iface]=1; // we cancel the contriubtion of split neighbors } } // Z DIRECTION ========================================================================= // --------- solving the Riemann Problems BOTTOM // Switching to Split description /* // =========================================== */ #ifdef RIEMANN_HLLC dspeedestimateZ_HLLC(&WN[0],&WC[0],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Z(&UN[0],FL); memcpy(&Us,&UN[0],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Z(&UC[0],FL); memcpy(&Us,&UC[0],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Z(&UN[0],FL); fact=WN[0].d*(SL-WN[0].w)/(SL-ustar); FL[0]+=(fact*1. -UN[0].d )*SL; FL[1]+=(fact*WN[0].u -UN[0].du)*SL; FL[2]+=(fact*WN[0].v -UN[0].dv)*SL; FL[3]+=(fact*ustar -UN[0].dw)*SL; FL[4]+=(fact*(UN[0].E/UN[0].d+(ustar-WN[0].w)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].w))))-UN[0].E )*SL; Us.d =(fact*1.); Us.du=(fact*WN[0].u); Us.dv=(fact*WN[0].v); Us.dw=(fact*ustar); Us.E =(fact*(UN[0].E/UN[0].d+(ustar-WN[0].w)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].w))))); #ifdef WRADHYD FL[6]+=(fact*WN[0].dX/WN[0].d -UN[0].dX)*SL; #ifdef HELIUM FL[7]+=(fact*WN[0].dXHE/WN[0].d -UN[0].dXHE)*SL; FL[8]+=(fact*WN[0].dXXHE/WN[0].d -UN[0].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Z(&UC[0],FL); fact=WC[0].d*(SR-WC[0].w)/(SR-ustar); FL[0]+=(fact*1. -UC[0].d )*SR; FL[1]+=(fact*WC[0].u -UC[0].du)*SR; FL[2]+=(fact*WC[0].v -UC[0].dv)*SR; FL[3]+=(fact*ustar -UC[0].dw)*SR; FL[4]+=(fact*(UC[0].E/UC[0].d+(ustar-WC[0].w)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].w))))-UC[0].E )*SR; Us.d =(fact*1.); Us.du=(fact*WC[0].u); Us.dv=(fact*WC[0].v); Us.dw=(fact*ustar); Us.E =(fact*(UC[0].E/UC[0].d+(ustar-WC[0].w)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].w))))); #ifdef WRADHYD FL[6]+=(fact*WC[0].dX/WC[0].d -UC[0].dX)*SR; #ifdef HELIUM FL[7]+=(fact*WC[0].dXHE/WC[0].d -UC[0].dXHE)*SR; FL[8]+=(fact*WC[0].dXXHE/WC[0].d -UC[0].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.dw/Us.d)*eold; FL[5]=(Us.dw/Us.d*ebar); divu+=-divuloc; #endif // =========================================== // --------- solving the Riemann Problems TOP // Switching to Split description //===================================================== #ifdef RIEMANN_HLLC dspeedestimateZ_HLLC(&WC[1],&WN[1],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Z(&UC[1],FR); memcpy(&Us,&UC[1],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Z(&UN[1],FR); memcpy(&Us,&UN[1],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Z(&UC[1],FR); fact=WC[1].d*(SL-WC[1].w)/(SL-ustar); FR[0]+=(fact*1. -UC[1].d )*SL; FR[1]+=(fact*WC[1].u -UC[1].du)*SL; FR[2]+=(fact*WC[1].v -UC[1].dv)*SL; FR[3]+=(fact*ustar -UC[1].dw)*SL; FR[4]+=(fact*(UC[1].E/UC[1].d+(ustar-WC[1].w)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].w))))-UC[1].E )*SL; Us.d =(fact*1.); Us.du=(fact*WC[1].u); Us.dv=(fact*WC[1].v); Us.dw=(fact*ustar); Us.E =(fact*(UC[1].E/UC[1].d+(ustar-WC[1].w)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].w))))); #ifdef WRADHYD FR[6]+=(fact*WC[1].dX/WC[1].d -UC[1].dX)*SL; #ifdef HELIUM FR[7]+=(fact*WC[1].dXHE/WC[1].d -UC[1].dXHE)*SL; FR[8]+=(fact*WC[1].dXXHE/WC[1].d -UC[1].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Z(&UN[1],FR); fact=WN[1].d*(SR-WN[1].w)/(SR-ustar); FR[0]+=(fact*1. -UN[1].d )*SR; FR[1]+=(fact*WN[1].u -UN[1].du)*SR; FR[2]+=(fact*WN[1].v -UN[1].dv)*SR; FR[3]+=(fact*ustar -UN[1].dw)*SR; FR[4]+=(fact*(UN[1].E/UN[1].d+(ustar-WN[1].w)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].w))))-UN[1].E )*SR; Us.d =(fact*1.); Us.du=(fact*WN[1].u); Us.dv=(fact*WN[1].v); Us.dw=(fact*ustar); Us.E =(fact*(UN[1].E/UN[1].d+(ustar-WN[1].w)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].w))))); #ifdef WRADHYD FR[6]+=(fact*WN[1].dX/WN[1].d -UN[1].dX)*SR; #ifdef HELIUM FR[7]+=(fact*WN[1].dXHE/WN[1].d -UN[1].dXHE)*SR; FR[8]+=(fact*WN[1].dXXHE/WN[1].d -UN[1].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.dw/Us.d)*eold; FR[5]=(Us.dw/Us.d*ebar); divu+= divuloc; #endif #endif //========================= copy the fluxes // Cancelling the fluxes from splitted neighbours for(iface=0;iface<NVAR;iface++) FL[iface]*=ffact[0]; for(iface=0;iface<NVAR;iface++) FR[iface]*=ffact[1]; memcpy(stencil[i].New.cell[icell].flux+4*NVAR,FL,sizeof(REAL)*NVAR); memcpy(stencil[i].New.cell[icell].flux+5*NVAR,FR,sizeof(REAL)*NVAR); stencil[i].New.cell[icell].divu=divu; //ready for the next oct } } } //============================================================================ // ============================================================================================= __global__ void dhydroM_sweepY(struct HGRID *stencil,int nread,REAL dx, REAL dt){ int inei,icell,iface; int i; int vnei[6],vcell[6]; REAL FL[NVAR],FR[NVAR]; struct Utype Uold; struct Wtype Wold; REAL pstar,ustar; struct Wtype WT[6]; // FOR MUSCL RECONSTRUCTION struct Wtype WC[6]; // FOR MUSCL RECONSTRUCTION struct Utype UC[2]; struct Utype UN[2]; struct Wtype WN[2]; int ioct[7]={12,14,10,16,4,22,13}; int idxnei[6]={1,0,3,2,5,4}; struct Wtype *curcell; REAL SL,SR; int ffact[2]={0,0}; REAL fact; struct Utype Us; REAL ebar; REAL divu,divuloc; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors memset(FL,0,sizeof(REAL)*NVAR); memset(FR,0,sizeof(REAL)*NVAR); // Getting the original state =========================== curcell=&(stencil[i].oct[ioct[6]].cell[icell].field); divu=stencil[i].New.cell[icell].divu; Wold.d=curcell->d; Wold.u=curcell->u; Wold.v=curcell->v; Wold.w=curcell->w; Wold.p=curcell->p; Wold.a=SQRT(GAMMA*Wold.p/Wold.d); /* #ifdef WRADHYD */ /* Wold.X=curcell->X; */ /* #endif */ dW2U(&Wold,&Uold); // primitive -> conservative REAL eold=Uold.eint; /* // MUSCL STATE RECONSTRUCTION */ memset(ffact,0,sizeof(int)*2); dMUSCL_BOUND2(stencil+i, 13, icell, WC,dt,dx);// central for(iface=0;iface<2;iface++){ inei=iface+2; memcpy(WC+iface,WC+inei,sizeof(struct Wtype)); // moving the data towards idx=0,1 //memcpy(WC+iface,&Wold,sizeof(struct Wtype)); // moving the data towards idx=0,1 dW2U(WC+iface,UC+iface); } // Neighbor MUSCL reconstruction for(iface=0;iface<2;iface++){ inei=iface+2; dMUSCL_BOUND2(stencil+i, ioct[vnei[inei]], vcell[inei], WT,dt,dx);// memcpy(WN+iface,WT+idxnei[inei],sizeof(struct Wtype)); //memcpy(WN+iface,&(stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].field),sizeof(struct Wtype)); dW2U(WN+iface,UN+iface); if(!stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].split){ ffact[iface]=1; // we cancel the contriubtion of split neighbors } } // Y DIRECTION ========================================================================= // --------- solving the Riemann Problems FRONT // Switching to Split description /* // =========================================== */ #ifdef RIEMANN_HLLC dspeedestimateY_HLLC(&WN[0],&WC[0],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Y(&UN[0],FL); memcpy(&Us,&UN[0],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Y(&UC[0],FL); memcpy(&Us,&UC[0],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Y(&UN[0],FL); fact=WN[0].d*(SL-WN[0].v)/(SL-ustar); FL[0]+=(fact*1. -UN[0].d )*SL; FL[1]+=(fact*WN[0].u -UN[0].du)*SL; FL[2]+=(fact*ustar -UN[0].dv)*SL; FL[3]+=(fact*WN[0].w -UN[0].dw)*SL; FL[4]+=(fact*(UN[0].E/UN[0].d+(ustar-WN[0].v)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].v))))-UN[0].E )*SL; Us.d =(fact*1.); Us.du=(fact*WN[0].u); Us.dv=(fact*ustar); Us.dw=(fact*WN[0].w); Us.E =(fact*(UN[0].E/UN[0].d+(ustar-WN[0].v)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].v))))); #ifdef WRADHYD FL[6]+=(fact*WN[0].dX/WN[0].d -UN[0].dX)*SL; #ifdef HELIUM FL[7]+=(fact*WN[0].dXHE/WN[0].d -UN[0].dXHE)*SL; FL[8]+=(fact*WN[0].dXXHE/WN[0].d -UN[0].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Y(&UC[0],FL); fact=WC[0].d*(SR-WC[0].v)/(SR-ustar); FL[0]+=(fact*1. -UC[0].d )*SR; FL[1]+=(fact*WC[0].u -UC[0].du)*SR; FL[2]+=(fact*ustar -UC[0].dv)*SR; FL[3]+=(fact*WC[0].w -UC[0].dw)*SR; FL[4]+=(fact*(UC[0].E/UC[0].d+(ustar-WC[0].v)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].v))))-UC[0].E )*SR; Us.d =(fact*1.); Us.du=(fact*WC[0].u); Us.dv=(fact*ustar); Us.dw=(fact*WC[0].w); Us.E =(fact*(UC[0].E/UC[0].d+(ustar-WC[0].v)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].v))))); #ifdef WRADHYD FL[6]+=(fact*WC[0].dX/WC[0].d -UC[0].dX)*SR; #ifdef HELIUM FL[7]+=(fact*WC[0].dXHE/WC[0].d -UC[0].dXHE)*SR; FL[8]+=(fact*WC[0].dXXHE/WC[0].d -UC[0].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); FL[5]=(Us.dv/Us.d*ebar); divuloc=(GAMMA-1.)*(Us.dv/Us.d)*eold; divu+=-divuloc; #endif // =========================================== // --------- solving the Riemann Problems BACK // Switching to Split description //===================================================== #ifdef RIEMANN_HLLC dspeedestimateY_HLLC(&WC[1],&WN[1],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_Y(&UC[1],FR); memcpy(&Us,&UC[1],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_Y(&UN[1],FR); memcpy(&Us,&UN[1],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_Y(&UC[1],FR); fact=WC[1].d*(SL-WC[1].v)/(SL-ustar); FR[0]+=(fact*1. -UC[1].d )*SL; FR[1]+=(fact*WC[1].u -UC[1].du)*SL; FR[2]+=(fact*ustar -UC[1].dv)*SL; FR[3]+=(fact*WC[1].w -UC[1].dw)*SL; FR[4]+=(fact*(UC[1].E/UC[1].d+(ustar-WC[1].v)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].v))))-UC[1].E )*SL; Us.d =(fact*1.); Us.du=(fact*WC[1].u); Us.dv=(fact*ustar); Us.dw=(fact*WC[1].w); Us.E =(fact*(UC[1].E/UC[1].d+(ustar-WC[1].v)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].v))))); #ifdef WRADHYD FR[6]+=(fact*WC[1].dX/WC[1].d -UC[1].dX)*SL; #ifdef HELIUM FR[7]+=(fact*WC[1].dXHE/WC[1].d -UC[1].dXHE)*SL; FR[8]+=(fact*WC[1].dXXHE/WC[1].d -UC[1].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_Y(&UN[1],FR); fact=WN[1].d*(SR-WN[1].v)/(SR-ustar); FR[0]+=(fact*1. -UN[1].d )*SR; FR[1]+=(fact*WN[1].u -UN[1].du)*SR; FR[2]+=(fact*ustar -UN[1].dv)*SR; FR[3]+=(fact*WN[1].w -UN[1].dw)*SR; FR[4]+=(fact*(UN[1].E/UN[1].d+(ustar-WN[1].v)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].v))))-UN[1].E )*SR; Us.d =(fact*1.); Us.du=(fact*WN[1].u); Us.dv=(fact*ustar); Us.dw=(fact*WN[1].w); Us.E =(fact*(UN[1].E/UN[1].d+(ustar-WN[1].v)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].v))))); #ifdef WRADHYD FR[6]+=(fact*WN[1].dX/WN[1].d -UN[1].dX)*SR; #ifdef HELIUM FR[7]+=(fact*WN[1].dXHE/WN[1].d -UN[1].dXHE)*SR; FR[8]+=(fact*WN[1].dXXHE/WN[1].d -UN[1].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.dv/Us.d)*eold; FR[5]=(Us.dv/Us.d*ebar); divu+= divuloc; #endif //========================= copy the fluxes // Cancelling the fluxes from splitted neighbours for(iface=0;iface<NVAR;iface++) FL[iface]*=ffact[0]; for(iface=0;iface<NVAR;iface++) FR[iface]*=ffact[1]; memcpy(stencil[i].New.cell[icell].flux+2*NVAR,FL,sizeof(REAL)*NVAR); memcpy(stencil[i].New.cell[icell].flux+3*NVAR,FR,sizeof(REAL)*NVAR); stencil[i].New.cell[icell].divu=divu; //ready for the next oct } } } //=================================================================================================== //=================================================================================================== __global__ void dhydroM_sweepX(struct HGRID *stencil, int nread,REAL dx, REAL dt){ //printf("IN\n"); int inei,icell,iface; int i; int vnei[6],vcell[6]; REAL FL[NVAR],FR[NVAR]; struct Utype Uold; struct Wtype Wold; REAL pstar,ustar; struct Wtype WT[6]; // FOR MUSCL RECONSTRUCTION struct Wtype WC[6]; // FOR MUSCL RECONSTRUCTION struct Utype UC[2]; struct Utype UN[2]; struct Wtype WN[2]; int ioct[7]={12,14,10,16,4,22,13}; int idxnei[6]={1,0,3,2,5,4}; struct Wtype *curcell; REAL SL,SR; int ffact[2]={0,0}; REAL fact; struct Utype Us; REAL ebar; REAL divu,divuloc; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells getcellnei_gpu_hydro(icell, vnei, vcell); // we get the neighbors memset(FL,0,sizeof(REAL)*NVAR); memset(FR,0,sizeof(REAL)*NVAR); // Getting the original state =========================== curcell=&(stencil[i].oct[ioct[6]].cell[icell].field); divu=stencil[i].New.cell[icell].divu; Wold.d=curcell->d; Wold.u=curcell->u;; Wold.v=curcell->v;; Wold.w=curcell->w;; Wold.p=curcell->p;; Wold.a=SQRT(GAMMA*Wold.p/Wold.d); //printf("dt=%e dx=%e Old =%e %e %e %e %e\n",dt,dx,Wold.d,Wold.u,Wold.v,Wold.w,Wold.p); /* #ifdef WRADHYD */ /* Wold.X=curcell->X; */ /* #endif */ dW2U(&Wold,&Uold); // primitive -> conservative REAL eold=Uold.eint; /* // MUSCL STATE RECONSTRUCTION */ memset(ffact,0,sizeof(int)*2); dMUSCL_BOUND2(stencil+i, 13, icell, WC,dt,dx);// central for(iface=0;iface<2;iface++){ dW2U(WC+iface,UC+iface); } // Neighbor MUSCL reconstruction for(iface=0;iface<2;iface++){ inei=iface; dMUSCL_BOUND2(stencil+i, ioct[vnei[inei]], vcell[inei], WT,dt,dx);// memcpy(WN+iface,WT+idxnei[inei],sizeof(struct Wtype)); dW2U(WN+iface,UN+iface); if(!stencil[i].oct[ioct[vnei[inei]]].cell[vcell[inei]].split){ ffact[iface]=1; // we cancel the contriubtion of split neighbors } } // X DIRECTION ========================================================================= // --------- solving the Riemann Problems LEFT // Switching to Split description /* // =========================================== */ #ifdef RIEMANN_HLLC //printf("Ho %e %e %e %e|| %e %e %e %e\n",WC[0].d,WC[0].u,WC[0].p,WC[0].a,WN[0].d,WN[0].u,WN[0].p,WN[0].a); dspeedestimateX_HLLC(&WN[0],&WC[0],&SL,&SR,&pstar,&ustar); //printf("Ha\n"); if(SL>=0.){ dgetflux_X(&UN[0],FL); memcpy(&Us,&UN[0],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_X(&UC[0],FL); memcpy(&Us,&UC[0],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_X(&UN[0],FL); fact=WN[0].d*(SL-WN[0].u)/(SL-ustar); FL[0]+=(fact*1. -UN[0].d )*SL; FL[1]+=(fact*ustar -UN[0].du)*SL; FL[2]+=(fact*WN[0].v -UN[0].dv)*SL; FL[3]+=(fact*WN[0].w -UN[0].dw)*SL; FL[4]+=(fact*(UN[0].E/UN[0].d+(ustar-WN[0].u)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].u))))-UN[0].E )*SL; Us.d =(fact*1.); Us.du=(fact*ustar); Us.dv=(fact*WN[0].v); Us.dw=(fact*WN[0].w); Us.E =(fact*(UN[0].E/UN[0].d+(ustar-WN[0].u)*(ustar+WN[0].p/(WN[0].d*(SL-WN[0].u))))); #ifdef WRADHYD FL[6]+=(fact*WN[0].dX/WN[0].d -UN[0].dX)*SL; #ifdef HELIUM FL[7]+=(fact*WN[0].dXHE/WN[0].d -UN[0].dXHE)*SL; FL[8]+=(fact*WN[0].dXXHE/WN[0].d -UN[0].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_X(&UC[0],FL); fact=WC[0].d*(SR-WC[0].u)/(SR-ustar); FL[0]+=(fact*1. -UC[0].d )*SR; FL[1]+=(fact*ustar -UC[0].du)*SR; FL[2]+=(fact*WC[0].v -UC[0].dv)*SR; FL[3]+=(fact*WC[0].w -UC[0].dw)*SR; FL[4]+=(fact*(UC[0].E/UC[0].d+(ustar-WC[0].u)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].u))))-UC[0].E )*SR; Us.d =(fact*1.); Us.du=(fact*ustar); Us.dv=(fact*WC[0].v); Us.dw=(fact*WC[0].w); Us.E =(fact*(UC[0].E/UC[0].d+(ustar-WC[0].u)*(ustar+WC[0].p/(WC[0].d*(SR-WC[0].u))))); #ifdef WRADHYD FL[6]+=(fact*WC[0].dX/WC[0].d -UC[0].dX)*SR; #ifdef HELIUM FL[7]+=(fact*WC[0].dXHE/WC[0].d -UC[0].dXHE)*SR; FL[8]+=(fact*WC[0].dXXHE/WC[0].d -UC[0].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.du/Us.d)*eold; FL[5]=(Us.du/Us.d*ebar); divu+=-divuloc; #endif // =========================================== // --------- solving the Riemann Problems RIGHT // Switching to Split description //===================================================== #ifdef RIEMANN_HLLC dspeedestimateX_HLLC(&WC[1],&WN[1],&SL,&SR,&pstar,&ustar); if(SL>=0.){ dgetflux_X(&UC[1],FR); memcpy(&Us,&UC[1],sizeof(struct Utype)); } else if(SR<=0.){ dgetflux_X(&UN[1],FR); memcpy(&Us,&UN[1],sizeof(struct Utype)); } else if((SL<0.)&&(ustar>=0.)){ dgetflux_X(&UC[1],FR); fact=WC[1].d*(SL-WC[1].u)/(SL-ustar); FR[0]+=(fact*1. -UC[1].d )*SL; FR[1]+=(fact*ustar -UC[1].du)*SL; FR[2]+=(fact*WC[1].v -UC[1].dv)*SL; FR[3]+=(fact*WC[1].w -UC[1].dw)*SL; FR[4]+=(fact*(UC[1].E/UC[1].d+(ustar-WC[1].u)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].u))))-UC[1].E )*SL; Us.d =(fact*1.); Us.du=(fact*ustar); Us.dv=(fact*WC[1].v); Us.dw=(fact*WC[1].w); Us.E =(fact*(UC[1].E/UC[1].d+(ustar-WC[1].u)*(ustar+WC[1].p/(WC[1].d*(SL-WC[1].u))))); #ifdef WRADHYD FR[6]+=(fact*WC[1].dX/WC[1].d -UC[1].dX)*SL; #ifdef HELIUM FR[7]+=(fact*WC[1].dXHE/WC[1].d -UC[1].dXHE)*SL; FR[8]+=(fact*WC[1].dXXHE/WC[1].d -UC[1].dXXHE)*SL; #endif #endif } else if((ustar<=0.)&&(SR>0.)){ dgetflux_X(&UN[1],FR); fact=WN[1].d*(SR-WN[1].u)/(SR-ustar); FR[0]+=(fact*1. -UN[1].d )*SR; FR[1]+=(fact*ustar -UN[1].du)*SR; FR[2]+=(fact*WN[1].v -UN[1].dv)*SR; FR[3]+=(fact*WN[1].w -UN[1].dw)*SR; FR[4]+=(fact*(UN[1].E/UN[1].d+(ustar-WN[1].u)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].u))))-UN[1].E )*SR; Us.d =(fact*1.); Us.du=(fact*ustar); Us.dv=(fact*WN[1].v); Us.dw=(fact*WN[1].w); Us.E =(fact*(UN[1].E/UN[1].d+(ustar-WN[1].u)*(ustar+WN[1].p/(WN[1].d*(SR-WN[1].u))))); #ifdef WRADHYD FR[6]+=(fact*WN[1].dX/WN[1].d -UN[1].dX)*SR; #ifdef HELIUM FR[7]+=(fact*WN[1].dXHE/WN[1].d -UN[1].dXHE)*SR; FR[8]+=(fact*WN[1].dXXHE/WN[1].d -UN[1].dXXHE)*SR; #endif #endif } ebar=(Us.E-0.5*(Us.du*Us.du+Us.dv*Us.dv+Us.dw*Us.dw)/Us.d); divuloc=(GAMMA-1.)*(Us.du/Us.d)*eold; FR[5]=(Us.du/Us.d*ebar); divu+= divuloc; #endif //========================= copy the fluxes // Cancelling the fluxes from splitted neighbours for(iface=0;iface<NVAR;iface++) FL[iface]*=ffact[0]; for(iface=0;iface<NVAR;iface++) FR[iface]*=ffact[1]; memcpy(stencil[i].New.cell[icell].flux+0*NVAR,FL,sizeof(REAL)*NVAR); memcpy(stencil[i].New.cell[icell].flux+1*NVAR,FR,sizeof(REAL)*NVAR); stencil[i].New.cell[icell].divu=divu; //ready for the next oct } } } // ============================================================================================================== // ============================================================================================================== __global__ void dupdatefield(struct HGRID *stencil, int nread, int stride, struct CPUINFO *cpu, REAL dxcur, REAL dtnew) { int i,icell; struct Utype U; REAL one; int flx; REAL dtsurdx=dtnew/dxcur; REAL F[NFLUX]; unsigned int bx=blockIdx.x; unsigned int tx=threadIdx.x; i=bx*blockDim.x+tx; if(i<nread){ for(icell=0;icell<8;icell++){ // we scan the cells if(stencil[i].oct[13].cell[icell].split) continue; memcpy(F,stencil[i].New.cell[icell].flux,sizeof(REAL)*NFLUX);// New fluxes from the stencil // ==== updating // actually we compute and store the delta U only one=1.; memset(&U,0,sizeof(struct Utype)); // setting delta U for(flx=0;flx<6;flx++){ U.d +=F[0+flx*NVAR]*dtsurdx*one; U.du+=F[1+flx*NVAR]*dtsurdx*one; U.dv+=F[2+flx*NVAR]*dtsurdx*one; U.dw+=F[3+flx*NVAR]*dtsurdx*one; U.E +=F[4+flx*NVAR]*dtsurdx*one; U.eint+=F[5+flx*NVAR]*dtsurdx*one; #ifdef WRADHYD #ifndef NOADX U.dX+=F[6+flx*NVAR]*dtsurdx*one; #ifdef HELIUM U.dXHE+=F[7+flx*NVAR]*dtsurdx*one; U.dXXHE+=F[8+flx*NVAR]*dtsurdx*one; #endif #else U.dX+=0.; #ifdef HELIUM U.dXHE+=0.; U.dXXHE+=0.; #endif #endif #endif one*=-1.; } // scatter back the delta Uwithin the stencil memcpy(&(stencil[i].New.cell[icell].deltaU),&U,sizeof(struct Utype)); } } } // ======================================================= //======================================================================= //======================================================================= int advancehydroGPU(struct OCT **firstoct, int level, struct CPUINFO *cpu, struct HGRID *stencil, int stride, REAL dxcur, REAL dtnew){ struct OCT *nextoct; struct OCT *curoct; struct OCT *curoct0; int nreadtot,nread; /* REAL t[10]; */ /* REAL tg=0.,th=0.,tu=0.,ts=0.; */ int is; int offset; CUDA_CHECK_ERROR("Hydro start"); // --------------- setting the first oct of the level nextoct=firstoct[level-1]; nreadtot=0; int ng; int nt; cudaStream_t stream[cpu->nstream]; // Not fully regular expression int vnread[cpu->nstream]; // creating the streams for(is=0;is<cpu->nstream;is++){ cudaStreamCreate(&stream[is]); } // Calculations if((nextoct!=NULL)&&(cpu->noct[level-1]!=0)){ do { curoct0=nextoct; curoct=curoct0; //t[0]=MPI_Wtime(); #if 1 offset=0; // streaming ==================== for(is=0;is<cpu->nstream;is++){ // ------------ gathering the stencil value values //printf("offser=%d\n",offset); curoct=nextoct; if(curoct!=NULL){ nextoct= gatherstencil(curoct,stencil+offset,stride/cpu->nstream,cpu, vnread+is); if(vnread[is]!=0){ ng=((vnread[is]-1)/cpu->nthread)+1; // +1 is for leftovers if(ng==1){ nt=vnread[is]; } else{ nt=cpu->nthread; } dim3 gridoct(ng); dim3 blockoct(nt); #ifndef NOCOMP cudaMemcpyAsync(cpu->hyd_stencil+offset,stencil+offset,vnread[is]*sizeof(struct HGRID),cudaMemcpyHostToDevice,stream[is]); //printf("Sweep hydro dt=%e dx=%e\n",dtnew,dxcur); //CUDA_CHECK_ERROR("Sweep hydro"); // ------------ solving the hydro dhydroM_sweepX<<<gridoct,blockoct,0,stream[is]>>>(cpu->hyd_stencil+offset,vnread[is],dxcur,dtnew); dhydroM_sweepY<<<gridoct,blockoct,0,stream[is]>>>(cpu->hyd_stencil+offset,vnread[is],dxcur,dtnew); dhydroM_sweepZ<<<gridoct,blockoct,0,stream[is]>>>(cpu->hyd_stencil+offset,vnread[is],dxcur,dtnew); //printf("Sweep hydro stop\n"); // ------------ updating values within the stencil dupdatefield<<<gridoct,blockoct,0,stream[is]>>>(cpu->hyd_stencil+offset,vnread[is],stride,cpu,dxcur,dtnew); cudaMemcpyAsync(stencil+offset,cpu->hyd_stencil+offset,vnread[is]*sizeof(struct HGRID),cudaMemcpyDeviceToHost,stream[is]); #endif offset+=vnread[is]; } } } #endif /* dev_updatefield<<<gridoct2,blockoct2>>>(cpu->hyd_stencil,nread,stride,cpu,dxcur,dtnew); */ cudaDeviceSynchronize(); // ------------ scatter back the FLUXES //cudaMemcpy(stencil,cpu->hyd_stencil,nread*sizeof(struct HGRID),cudaMemcpyDeviceToHost); nread=offset; nextoct=scatterstencil(curoct0,stencil, nread, cpu,dxcur,dtnew); //t[8]=MPI_Wtime(); nreadtot+=nread; /* ts+=(t[8]-t[6]); */ /* tu+=(t[6]-t[4]); */ /* th+=(t[4]-t[2]); */ /* tg+=(t[2]-t[0]); */ //printf("Start Error Hyd =%s nreadtot=%d\n",cudaGetErrorString(cudaGetLastError()),nreadtot); }while(nextoct!=NULL); } //printf("GPU | tgat=%e tcal=%e tup=%e tscat=%e\n",tg,th,tu,ts); // Destroying the streams for(is=0;is<cpu->nstream;is++){ cudaStreamDestroy(stream[is]); } // printf("Start Error Hyd =%s nreadtot=%d\n",cudaGetErrorString(cudaGetLastError()),nreadtot); CUDA_CHECK_ERROR("Hydro Stop"); return nreadtot; } #endif
1bee6f73fcd895969fa43176ebf97a9d7b18b112.hip
// !!! This is a file automatically generated by hipify!!! #include "distconv/tensor/tensor_mpi_cuda.hpp" #include "distconv/tensor/halo_cuda.hpp" #include "distconv/tensor/algorithms/transform_cuda.hpp" #include "distconv/util/util.hpp" #include "distconv/util/util_cuda.hpp" #include "distconv/util/util_mpi.hpp" #include <hip/hip_runtime.h> namespace distconv { namespace tensor { namespace internal { template <typename DataType> struct ClearHaloFunctor { using Vec2 = typename util::GetVectorType<DataType, 2>::type; using Vec4 = typename util::GetVectorType<DataType, 4>::type; static constexpr HaloTraversalOpGroup group = HaloTraversalOpGroup::THREAD; static constexpr bool has_pre_grid = false; static constexpr bool has_post_grid = false; static constexpr bool modifies_tensor = true; ClearHaloFunctor() {} __device__ void operator()(DataType &x, size_t) { x = DataType(0); } __device__ void operator()(Vec2 &x, size_t) { x.x = DataType(0); x.y = DataType(0); } __device__ void operator()(Vec4 &x, size_t) { x.x = DataType(0); x.y = DataType(0); x.z = DataType(0); x.w = DataType(0); } }; template <typename DataType> struct ScaleFunctor { ScaleFunctor(DataType v): m_v(v) {} __device__ void operator()(DataType &x) { x *= m_v; } DataType m_v; }; template <typename DataType1, typename DataType2> struct CastFuctor { __device__ void operator()(DataType1 &x, const DataType2 &y) { x = static_cast<DataType1>(y); } }; template <typename DataType1, typename DataType2> struct CastScaleBiasFuctor { CastScaleBiasFuctor(const DataType1 alpha, const DataType1 beta) : m_alpba(alpha), m_beta(beta) {} __device__ void operator()(DataType1 &x, const DataType2 &y) { x = m_alpba*static_cast<DataType1>(y)+m_beta; } DataType1 m_alpba, m_beta; }; } // namespace internal #define DEFINE_CLEAR_HALO(TYPE) \ template <> \ void TensorImplHelper<TYPE, HIPAllocator>::clear_halo(int dim, hipStream_t s) { \ TraverseHalo<TensorImplType::TensorType, internal::ClearHaloFunctor<TYPE>>( \ *(m_impl.get_tensor()), dim, false, internal::ClearHaloFunctor<TYPE>(), s); \ } DEFINE_CLEAR_HALO(float) DEFINE_CLEAR_HALO(double) DEFINE_CLEAR_HALO(int) #undef DEFINE_CLEAR_HALO #define DEFINE_SCALE(TYPE) \ template <> \ void TensorImplHelper<TYPE, HIPAllocator>::scale(TYPE v, hipStream_t s) { \ Transform(*(m_impl.get_tensor()), internal::ScaleFunctor<TYPE>(v), s); \ } DEFINE_SCALE(float) DEFINE_SCALE(double) DEFINE_SCALE(int) #undef DEFINE_SCALE #define DEFINE_CAST(T1, T2) \ template <> \ int Cast<T1, T2>(Tensor<T1, LocaleMPI, HIPAllocator> &t_dest, \ const Tensor<T2, LocaleMPI, HIPAllocator> &t_src, \ hipStream_t stream) { \ Transform(t_dest, t_src, internal::CastFuctor<T1, T2>(), \ stream); \ return 0; \ } DEFINE_CAST(float, short) DEFINE_CAST(float, unsigned short) #undef DEFINE_CAST #define DEFINE_CAST_SCALE_BIAS(T1, T2) \ template <> \ int CastScaleBias<T1, T2>(Tensor<T1, LocaleMPI, HIPAllocator> &t_dest, \ const Tensor<T2, LocaleMPI, HIPAllocator> &t_src, \ const T1 alpha, \ const T1 beta, \ hipStream_t stream) { \ Transform(t_dest, t_src, internal::CastScaleBiasFuctor<T1, T2>(alpha, beta), \ stream); \ return 0; \ } DEFINE_CAST_SCALE_BIAS(float, float) DEFINE_CAST_SCALE_BIAS(float, short) DEFINE_CAST_SCALE_BIAS(float, unsigned short) #undef DEFINE_CAST_SCALE_BIAS namespace internal { template <typename DataType1, typename DataType2> __device__ __forceinline__ void assign(DataType1 &t1, DataType2 &t2) { } template <typename DataType1, typename DataType2> __device__ __forceinline__ void assign(const DataType1 &t1, DataType2 &t2) { t2 = t1; } template <typename DataType1, typename DataType2> __device__ __forceinline__ void assign(DataType1 &t1, const DataType2 &t2) { t1 = t2; } template <int ND, int INNER_DIM, typename DataType1, typename DataType2, bool is_concat> __global__ void concat_or_slice_kernel( DataType1 *dst, Array<ND> dst_shape, Array<ND> dst_strides, DataType2 *src1, Array<ND> src1_shape, Array<ND> src1_strides, DataType2 *src2, Array<ND> src2_shape, Array<ND> src2_strides, int concat_dim) { // NOTE: For simplicity, dimension of concat_dim is assumed to be traversed by // different thread blocks. const int tid = threadIdx.x; int bid = blockIdx.x; const int block_size = blockDim.x; DataType2 *src = nullptr; Array<ND> src_strides; Array<ND> src_block_idx; Array<ND> dst_block_idx; #pragma unroll for (int i = INNER_DIM + 1; i < ND; ++i) { auto idx = bid % dst_shape[i]; dst_block_idx[i] = idx; bid /= dst_shape[i]; src_block_idx[i] = idx; if (i == concat_dim) { if (idx < src1_shape[i]) { src = src1; src_strides = src1_strides; } else { src = src2; src_strides = src2_strides; src_block_idx[i] = idx - src1_shape[i]; } } } #pragma unroll for (int i = INNER_DIM + 1; i < ND; ++i) { dst += dst_block_idx[i] * dst_strides[i]; src += src_block_idx[i] * src_strides[i]; } // Assume the region a thread block traverses has the same shape // between dst and src int inner_size = 1; #pragma unroll for (int i = 0; i <= INNER_DIM; ++i) { inner_size *= dst_shape[i]; } for (int inner_idx = tid; inner_idx < inner_size; inner_idx += block_size) { int dst_offset = 0; int src_offset = 0; int inner_idx_i = inner_idx; #pragma unroll for (int j = 0; j <= INNER_DIM; ++j) { int idx_j = inner_idx_i % dst_shape[j]; dst_offset += dst_strides[j] * idx_j; src_offset += src_strides[j] * idx_j; inner_idx_i /= dst_shape[j]; } assign(dst[dst_offset], src[src_offset]); } } template <bool B, typename T> struct AddConstIf; template <typename T> struct AddConstIf<true, T> { using type = typename std::add_const<T>::type; }; template <typename T> struct AddConstIf<false, T> { using type = T; }; template <typename DataType, bool IS_CONCAT> int ConcatenateOrSlice( typename AddConstIf<!IS_CONCAT, Tensor<DataType, LocaleMPI, HIPAllocator>>::type &t_dest, typename AddConstIf<IS_CONCAT, Tensor<DataType, LocaleMPI, HIPAllocator>>::type &t_src1, typename AddConstIf<IS_CONCAT, Tensor<DataType, LocaleMPI, HIPAllocator>>::type &t_src2, hipStream_t s) { const int nd = t_dest.get_num_dims(); int block_dim = 256; // tunable int concat_dim = -1; for (int i = 0; i < nd; ++i) { auto dest_dim = t_dest.get_shape()[i]; auto src1_dim = t_src1.get_shape()[i]; auto src2_dim = t_src2.get_shape()[i]; if (dest_dim == src1_dim && dest_dim == src2_dim) { // this is not concat dim continue; } assert_always(dest_dim == src1_dim + src2_dim); concat_dim = i; break; } // TODO: only works for U-Net. Concat on channel dim assert_always(concat_dim == nd - 2); using DataType1 = typename AddConstIf<!IS_CONCAT, DataType>::type; using DataType2 = typename AddConstIf<IS_CONCAT, DataType>::type; #define CALL_KERNEL(ND, INNER_DIM) do { \ assert_always(concat_dim > INNER_DIM); \ int grid_dim = 1; \ for (int i = INNER_DIM + 1; i < ND; ++i) { \ grid_dim *= t_dest.get_local_shape()[i]; \ } \ hipLaunchKernelGGL(( concat_or_slice_kernel<ND, INNER_DIM, DataType1, DataType2, IS_CONCAT>) \ , dim3(grid_dim), dim3(block_dim), 0, s, \ t_dest.get_base_ptr(), Array<ND>(t_dest.get_local_shape()), \ Array<ND>(t_dest.get_strides()), \ t_src1.get_base_ptr(), Array<ND>(t_src1.get_local_shape()), \ Array<ND>(t_src1.get_strides()), \ t_src2.get_base_ptr(), Array<ND>(t_src2.get_local_shape()), \ Array<ND>(t_src2.get_strides()), \ concat_dim); \ } while (0) switch (nd) { case 3: CALL_KERNEL(3, 1); break; case 4: CALL_KERNEL(4, 1); break; case 5: // Needs more robust tuning CALL_KERNEL(5, 1); break; default: throw std::exception(); } #undef CALL_KERNEL return 0; } } // namespace internal template <typename DataType> int Concatenate(Tensor<DataType, LocaleMPI, HIPAllocator> &t_dest, const Tensor<DataType, LocaleMPI, HIPAllocator> &t_src1, const Tensor<DataType, LocaleMPI, HIPAllocator> &t_src2, hipStream_t s) { return internal::ConcatenateOrSlice<DataType, true>( t_dest, t_src1, t_src2, s); } template <typename DataType> int Slice(Tensor<DataType, LocaleMPI, HIPAllocator> &t_dest1, Tensor<DataType, LocaleMPI, HIPAllocator> &t_dest2, const Tensor<DataType, LocaleMPI, HIPAllocator> &t_src, hipStream_t s) { return internal::ConcatenateOrSlice<DataType, false>( t_src, t_dest1, t_dest2, s); } #define DEFINE_CONCATENATE(TYPE) \ template \ int Concatenate<TYPE>(Tensor<TYPE, LocaleMPI, HIPAllocator> &t_dest, \ const Tensor<TYPE, LocaleMPI, HIPAllocator> &t_src1, \ const Tensor<TYPE, LocaleMPI, HIPAllocator> &t_src2, \ hipStream_t s); \ template \ int Slice<TYPE>(Tensor<TYPE, LocaleMPI, HIPAllocator> &t_dest1, \ Tensor<TYPE, LocaleMPI, HIPAllocator> &t_dest2, \ const Tensor<TYPE, LocaleMPI, HIPAllocator> &t_src, \ hipStream_t s); DEFINE_CONCATENATE(float) DEFINE_CONCATENATE(double) DEFINE_CONCATENATE(int) DEFINE_CONCATENATE(long) #undef DEFINE_CONCATENATE } // namespace tensor } // namespace distconv
1bee6f73fcd895969fa43176ebf97a9d7b18b112.cu
#include "distconv/tensor/tensor_mpi_cuda.hpp" #include "distconv/tensor/halo_cuda.hpp" #include "distconv/tensor/algorithms/transform_cuda.hpp" #include "distconv/util/util.hpp" #include "distconv/util/util_cuda.hpp" #include "distconv/util/util_mpi.hpp" #include <cuda_runtime.h> namespace distconv { namespace tensor { namespace internal { template <typename DataType> struct ClearHaloFunctor { using Vec2 = typename util::GetVectorType<DataType, 2>::type; using Vec4 = typename util::GetVectorType<DataType, 4>::type; static constexpr HaloTraversalOpGroup group = HaloTraversalOpGroup::THREAD; static constexpr bool has_pre_grid = false; static constexpr bool has_post_grid = false; static constexpr bool modifies_tensor = true; ClearHaloFunctor() {} __device__ void operator()(DataType &x, size_t) { x = DataType(0); } __device__ void operator()(Vec2 &x, size_t) { x.x = DataType(0); x.y = DataType(0); } __device__ void operator()(Vec4 &x, size_t) { x.x = DataType(0); x.y = DataType(0); x.z = DataType(0); x.w = DataType(0); } }; template <typename DataType> struct ScaleFunctor { ScaleFunctor(DataType v): m_v(v) {} __device__ void operator()(DataType &x) { x *= m_v; } DataType m_v; }; template <typename DataType1, typename DataType2> struct CastFuctor { __device__ void operator()(DataType1 &x, const DataType2 &y) { x = static_cast<DataType1>(y); } }; template <typename DataType1, typename DataType2> struct CastScaleBiasFuctor { CastScaleBiasFuctor(const DataType1 alpha, const DataType1 beta) : m_alpba(alpha), m_beta(beta) {} __device__ void operator()(DataType1 &x, const DataType2 &y) { x = m_alpba*static_cast<DataType1>(y)+m_beta; } DataType1 m_alpba, m_beta; }; } // namespace internal #define DEFINE_CLEAR_HALO(TYPE) \ template <> \ void TensorImplHelper<TYPE, CUDAAllocator>::clear_halo(int dim, cudaStream_t s) { \ TraverseHalo<TensorImplType::TensorType, internal::ClearHaloFunctor<TYPE>>( \ *(m_impl.get_tensor()), dim, false, internal::ClearHaloFunctor<TYPE>(), s); \ } DEFINE_CLEAR_HALO(float) DEFINE_CLEAR_HALO(double) DEFINE_CLEAR_HALO(int) #undef DEFINE_CLEAR_HALO #define DEFINE_SCALE(TYPE) \ template <> \ void TensorImplHelper<TYPE, CUDAAllocator>::scale(TYPE v, cudaStream_t s) { \ Transform(*(m_impl.get_tensor()), internal::ScaleFunctor<TYPE>(v), s); \ } DEFINE_SCALE(float) DEFINE_SCALE(double) DEFINE_SCALE(int) #undef DEFINE_SCALE #define DEFINE_CAST(T1, T2) \ template <> \ int Cast<T1, T2>(Tensor<T1, LocaleMPI, CUDAAllocator> &t_dest, \ const Tensor<T2, LocaleMPI, CUDAAllocator> &t_src, \ cudaStream_t stream) { \ Transform(t_dest, t_src, internal::CastFuctor<T1, T2>(), \ stream); \ return 0; \ } DEFINE_CAST(float, short) DEFINE_CAST(float, unsigned short) #undef DEFINE_CAST #define DEFINE_CAST_SCALE_BIAS(T1, T2) \ template <> \ int CastScaleBias<T1, T2>(Tensor<T1, LocaleMPI, CUDAAllocator> &t_dest, \ const Tensor<T2, LocaleMPI, CUDAAllocator> &t_src, \ const T1 alpha, \ const T1 beta, \ cudaStream_t stream) { \ Transform(t_dest, t_src, internal::CastScaleBiasFuctor<T1, T2>(alpha, beta), \ stream); \ return 0; \ } DEFINE_CAST_SCALE_BIAS(float, float) DEFINE_CAST_SCALE_BIAS(float, short) DEFINE_CAST_SCALE_BIAS(float, unsigned short) #undef DEFINE_CAST_SCALE_BIAS namespace internal { template <typename DataType1, typename DataType2> __device__ __forceinline__ void assign(DataType1 &t1, DataType2 &t2) { } template <typename DataType1, typename DataType2> __device__ __forceinline__ void assign(const DataType1 &t1, DataType2 &t2) { t2 = t1; } template <typename DataType1, typename DataType2> __device__ __forceinline__ void assign(DataType1 &t1, const DataType2 &t2) { t1 = t2; } template <int ND, int INNER_DIM, typename DataType1, typename DataType2, bool is_concat> __global__ void concat_or_slice_kernel( DataType1 *dst, Array<ND> dst_shape, Array<ND> dst_strides, DataType2 *src1, Array<ND> src1_shape, Array<ND> src1_strides, DataType2 *src2, Array<ND> src2_shape, Array<ND> src2_strides, int concat_dim) { // NOTE: For simplicity, dimension of concat_dim is assumed to be traversed by // different thread blocks. const int tid = threadIdx.x; int bid = blockIdx.x; const int block_size = blockDim.x; DataType2 *src = nullptr; Array<ND> src_strides; Array<ND> src_block_idx; Array<ND> dst_block_idx; #pragma unroll for (int i = INNER_DIM + 1; i < ND; ++i) { auto idx = bid % dst_shape[i]; dst_block_idx[i] = idx; bid /= dst_shape[i]; src_block_idx[i] = idx; if (i == concat_dim) { if (idx < src1_shape[i]) { src = src1; src_strides = src1_strides; } else { src = src2; src_strides = src2_strides; src_block_idx[i] = idx - src1_shape[i]; } } } #pragma unroll for (int i = INNER_DIM + 1; i < ND; ++i) { dst += dst_block_idx[i] * dst_strides[i]; src += src_block_idx[i] * src_strides[i]; } // Assume the region a thread block traverses has the same shape // between dst and src int inner_size = 1; #pragma unroll for (int i = 0; i <= INNER_DIM; ++i) { inner_size *= dst_shape[i]; } for (int inner_idx = tid; inner_idx < inner_size; inner_idx += block_size) { int dst_offset = 0; int src_offset = 0; int inner_idx_i = inner_idx; #pragma unroll for (int j = 0; j <= INNER_DIM; ++j) { int idx_j = inner_idx_i % dst_shape[j]; dst_offset += dst_strides[j] * idx_j; src_offset += src_strides[j] * idx_j; inner_idx_i /= dst_shape[j]; } assign(dst[dst_offset], src[src_offset]); } } template <bool B, typename T> struct AddConstIf; template <typename T> struct AddConstIf<true, T> { using type = typename std::add_const<T>::type; }; template <typename T> struct AddConstIf<false, T> { using type = T; }; template <typename DataType, bool IS_CONCAT> int ConcatenateOrSlice( typename AddConstIf<!IS_CONCAT, Tensor<DataType, LocaleMPI, CUDAAllocator>>::type &t_dest, typename AddConstIf<IS_CONCAT, Tensor<DataType, LocaleMPI, CUDAAllocator>>::type &t_src1, typename AddConstIf<IS_CONCAT, Tensor<DataType, LocaleMPI, CUDAAllocator>>::type &t_src2, cudaStream_t s) { const int nd = t_dest.get_num_dims(); int block_dim = 256; // tunable int concat_dim = -1; for (int i = 0; i < nd; ++i) { auto dest_dim = t_dest.get_shape()[i]; auto src1_dim = t_src1.get_shape()[i]; auto src2_dim = t_src2.get_shape()[i]; if (dest_dim == src1_dim && dest_dim == src2_dim) { // this is not concat dim continue; } assert_always(dest_dim == src1_dim + src2_dim); concat_dim = i; break; } // TODO: only works for U-Net. Concat on channel dim assert_always(concat_dim == nd - 2); using DataType1 = typename AddConstIf<!IS_CONCAT, DataType>::type; using DataType2 = typename AddConstIf<IS_CONCAT, DataType>::type; #define CALL_KERNEL(ND, INNER_DIM) do { \ assert_always(concat_dim > INNER_DIM); \ int grid_dim = 1; \ for (int i = INNER_DIM + 1; i < ND; ++i) { \ grid_dim *= t_dest.get_local_shape()[i]; \ } \ concat_or_slice_kernel<ND, INNER_DIM, DataType1, DataType2, IS_CONCAT> \ <<<grid_dim, block_dim, 0, s>>>( \ t_dest.get_base_ptr(), Array<ND>(t_dest.get_local_shape()), \ Array<ND>(t_dest.get_strides()), \ t_src1.get_base_ptr(), Array<ND>(t_src1.get_local_shape()), \ Array<ND>(t_src1.get_strides()), \ t_src2.get_base_ptr(), Array<ND>(t_src2.get_local_shape()), \ Array<ND>(t_src2.get_strides()), \ concat_dim); \ } while (0) switch (nd) { case 3: CALL_KERNEL(3, 1); break; case 4: CALL_KERNEL(4, 1); break; case 5: // Needs more robust tuning CALL_KERNEL(5, 1); break; default: throw std::exception(); } #undef CALL_KERNEL return 0; } } // namespace internal template <typename DataType> int Concatenate(Tensor<DataType, LocaleMPI, CUDAAllocator> &t_dest, const Tensor<DataType, LocaleMPI, CUDAAllocator> &t_src1, const Tensor<DataType, LocaleMPI, CUDAAllocator> &t_src2, cudaStream_t s) { return internal::ConcatenateOrSlice<DataType, true>( t_dest, t_src1, t_src2, s); } template <typename DataType> int Slice(Tensor<DataType, LocaleMPI, CUDAAllocator> &t_dest1, Tensor<DataType, LocaleMPI, CUDAAllocator> &t_dest2, const Tensor<DataType, LocaleMPI, CUDAAllocator> &t_src, cudaStream_t s) { return internal::ConcatenateOrSlice<DataType, false>( t_src, t_dest1, t_dest2, s); } #define DEFINE_CONCATENATE(TYPE) \ template \ int Concatenate<TYPE>(Tensor<TYPE, LocaleMPI, CUDAAllocator> &t_dest, \ const Tensor<TYPE, LocaleMPI, CUDAAllocator> &t_src1, \ const Tensor<TYPE, LocaleMPI, CUDAAllocator> &t_src2, \ cudaStream_t s); \ template \ int Slice<TYPE>(Tensor<TYPE, LocaleMPI, CUDAAllocator> &t_dest1, \ Tensor<TYPE, LocaleMPI, CUDAAllocator> &t_dest2, \ const Tensor<TYPE, LocaleMPI, CUDAAllocator> &t_src, \ cudaStream_t s); DEFINE_CONCATENATE(float) DEFINE_CONCATENATE(double) DEFINE_CONCATENATE(int) DEFINE_CONCATENATE(long) #undef DEFINE_CONCATENATE } // namespace tensor } // namespace distconv
04346be4cd418ad1beb2cbf381cbac1f77635fdd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void copy_sort_int( const int *orig, const unsigned int *sort_idx, const unsigned int nitems, int *sorted ) { for( int i = 0; i < nitems; ++ i ) { sorted[sort_idx[i]] = orig[i]; } }
04346be4cd418ad1beb2cbf381cbac1f77635fdd.cu
#include "includes.h" __global__ void copy_sort_int( const int *orig, const unsigned int *sort_idx, const unsigned int nitems, int *sorted ) { for( int i = 0; i < nitems; ++ i ) { sorted[sort_idx[i]] = orig[i]; } }
5ab4640ef1d7d7bc80bb82d4c1e3fc08c8d1ee12.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> // assert() is only supported // for devices of compute capability 2.0 and higher #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #undef assert #define assert(arg) #endif __global__ void testAssert(void) { int is_one = 1; int should_be_one = 0; // This will have no effect assert(is_one); // This will halt kernel execution assert(should_be_one); } int main(int argc, char* argv[]) { hipLaunchKernelGGL(( testAssert), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); return 0; }
5ab4640ef1d7d7bc80bb82d4c1e3fc08c8d1ee12.cu
#include <assert.h> // assert() is only supported // for devices of compute capability 2.0 and higher #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #undef assert #define assert(arg) #endif __global__ void testAssert(void) { int is_one = 1; int should_be_one = 0; // This will have no effect assert(is_one); // This will halt kernel execution assert(should_be_one); } int main(int argc, char* argv[]) { testAssert<<<1,1>>>(); cudaDeviceSynchronize(); return 0; }
08f564a5132aa457e67287aa82e3634d8a9762ec.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "pybind11/stl.h" #include "contrib_ops/rocm/bert/batched_gemm_softmax_gemm_permute_pipelines.cuh" #include "core/providers/rocm/tunable/rocm_tunable.h" #include "python/tools/kernel_explorer/device_array.h" #include "python/tools/kernel_explorer/kernel_explorer_interface.h" #include <vector> namespace py = pybind11; using namespace onnxruntime::contrib::rocm; namespace onnxruntime { template <typename T> class IGemmSoftmaxGemmPermuteKernelExplorer : public IKernelExplorer { public: IGemmSoftmaxGemmPermuteKernelExplorer( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) { ROCBLAS_CALL_THROW(rocblas_create_handle(&rocblas_handle_)); attn_.batch_size = batch; attn_.sequence_length = seqlen; attn_.kv_sequence_length = seqlen; // NOTE: not used attn_.past_sequence_length = 0; attn_.original_past_sequence_length = 0; // NOTE: not used attn_.total_sequence_length = total_seqlen; attn_.max_sequence_length = 0; attn_.hidden_size = num_heads * head_size; attn_.head_size = head_size; attn_.v_hidden_size = attn_.hidden_size; // Q,K,V hidden size must agree now attn_.v_head_size = attn_.head_size; // Q,K,V hidden size must agree now attn_.num_heads = num_heads; attn_.is_unidirectional = false; attn_.past_present_share_buffer = false; attn_.do_rotary = false; attn_.mask_filter_value = -10000.0f; attn_.scale = scale; if (mask_dim == 0) { attn_.mask_type = contrib::MASK_NONE; } else if (mask_dim == 2) { attn_.mask_type = contrib::MASK_2D_KEY_PADDING; } else if (mask_dim == 3) { attn_.mask_type = contrib::MASK_3D_ATTENTION; } else if (mask_dim == 4) { attn_.mask_type = contrib::MASK_4D_MEGATRON; } else { ORT_ENFORCE(false, "mask type not supported"); } device_prop = GetEp()->GetDeviceProp(); params_.tuning_ctx = TuningContext(); params_.stream = Stream(); params_.handle = rocblas_handle_; params_.attention = &attn_; params_.device_prop = &device_prop; params_.scale = scale; params_.q_buffer = reinterpret_cast<T*>(Q.ptr()); params_.k_buffer = reinterpret_cast<T*>(K.ptr()); params_.v_buffer = reinterpret_cast<T*>(V.ptr()); if (attn_bias.has_value()) { params_.bias_buffer = reinterpret_cast<T*>(attn_bias->ptr()); } if (attn_mask.has_value()) { params_.mask_index_buffer = reinterpret_cast<int*>(attn_mask->ptr()); if (mask_dim == 2) { params_.mask_index_dims = {batch, total_seqlen}; } else if (mask_dim == 3) { params_.mask_index_dims = {batch, seqlen, total_seqlen}; } else if (mask_dim == 4) { ORT_ENFORCE(max_seqlen.has_value()); attn_.max_sequence_length = max_seqlen.value(); ORT_ENFORCE(attn_.max_sequence_length >= seqlen); attn_.past_sequence_length = attn_.max_sequence_length - seqlen; params_.mask_index_dims = {batch, 1, attn_.max_sequence_length, attn_.max_sequence_length}; } } params_.out_buffer = reinterpret_cast<T*>(out.ptr()); } ~IGemmSoftmaxGemmPermuteKernelExplorer() { ROCBLAS_CALL_THROW(rocblas_destroy_handle(rocblas_handle_)); } void SetWorkspace(size_t num_bytes) { void* ptr; HIP_CALL_THROW(hipMalloc(&ptr, num_bytes)); workspace_.reset(ptr, [](void* ptr) { HIP_CALL_THROW(hipFree(ptr)); }); params_.workspace_buffer = reinterpret_cast<T*>(workspace_.get()); } protected: using ParamsT = contrib::rocm::GemmSoftmaxGemmPermuteParams<T>; rocblas_handle rocblas_handle_; hipDeviceProp_t device_prop; contrib::AttentionParameters attn_; ParamsT params_; std::shared_ptr<void> workspace_; }; // The pipeline composed from rocblas api calls and kernel launches. template <typename T> class GemmSoftmaxGemmPermuteGeneric : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteGeneric( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(GemmSoftmaxGemmPermuteGenericPipeline<T>::GetWorkspaceNumBytes(&this->attn_)); } std::vector<std::string> ListOps() const { return {"Generic"}; } bool SelectOp(const std::string&) { return true; } void Run() override { ORT_THROW_IF_ERROR(GemmSoftmaxGemmPermuteGenericPipeline<T>::Run( &this->params_, /*use_persistent_softmax=*/false)); } }; #ifdef USE_COMPOSABLE_KERNEL template <typename T, bool USE_BIAS, bool USE_MASK> class GemmSoftmaxGemmPermuteCK : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteCK( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(GemmSoftmaxGemmPermuteTunableOp<T>::GetWorkspaceNumBytes(&this->attn_)); for (auto&& [ts, op] : GetCKGemmSoftmaxGemmPermuteTypeStringAndOps<T, USE_BIAS, USE_MASK>()) { type_strings_.emplace_back(std::move(ts)); ops_.emplace_back(std::move(op)); } } std::vector<std::string> ListOps() const { return type_strings_; } bool SelectOp(const std::string& name) { for (size_t i = 0; i < ops_.size(); i++) { if (type_strings_[i] == name) { selected_op_ = i; Status status = ops_[i].IsSupported(&this->params_); return status.IsOK(); } } ORT_THROW("Cannot find implementation ", name); } void Run() override { ORT_THROW_IF_ERROR(ops_[selected_op_](&this->params_)); } private: using ParamsT = typename IGemmSoftmaxGemmPermuteKernelExplorer<T>::ParamsT; using OpT = Op<ParamsT>; std::vector<OpT> ops_; std::vector<std::string> type_strings_; size_t selected_op_{}; }; #endif // USE_COMPOSABLE_KERNEL // The pipeline composed from rocblas api calls and kernel launches. template <typename T> class GemmSoftmaxGemmPermuteTunable : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteTunable( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(::max( GemmSoftmaxGemmPermuteGenericPipeline<T>::GetWorkspaceNumBytes(&this->attn_), GemmSoftmaxGemmPermuteTunableOp<T>::GetWorkspaceNumBytes(&this->attn_))); this->params_.TuningContext()->EnableTunableOpAndTuning(); } std::vector<std::string> ListOps() const { return {"Tunable"}; } bool SelectOp(const std::string&) { return true; } void Run() override { ORT_THROW_IF_ERROR(GemmSoftmaxGemmPermuteTunableOp<T>{}(&this->params_)); } }; #define REGISTER_COMMON(name, type, ...) \ py::class_<type<__VA_ARGS__>>(m, name) \ .def(py::init<int64_t, int64_t, int64_t, std::optional<int64_t>, int64_t, int64_t, int64_t, \ float, \ DeviceArray&, \ DeviceArray&, \ DeviceArray&, \ std::optional<DeviceArray>&, \ std::optional<DeviceArray>&, \ DeviceArray&>()) \ .def("SetRepeats", &type<__VA_ARGS__>::SetRepeats) \ .def("Run", &type<__VA_ARGS__>::Run) \ .def("Profile", &type<__VA_ARGS__>::Profile) \ .def("ListOps", &type<__VA_ARGS__>::ListOps) \ .def("SelectOp", &type<__VA_ARGS__>::SelectOp); #define REGISTER_GENERIC(dtype) \ REGISTER_COMMON("GemmSoftmaxGemmPermuteGeneric_" #dtype, GemmSoftmaxGemmPermuteGeneric, dtype) #define REGISTER_CK(dtype, biased, masked, mask_bias_suffix) \ REGISTER_COMMON( \ "GemmSoftmaxGemmPermuteCK" mask_bias_suffix "_" #dtype, GemmSoftmaxGemmPermuteCK, dtype, biased, masked) #define REGISTER_TUNABLE(dtype) \ REGISTER_COMMON("GemmSoftmaxGemmPermuteTunable_" #dtype, GemmSoftmaxGemmPermuteTunable, dtype) KE_REGISTER(m) { REGISTER_GENERIC(half); #ifdef USE_COMPOSABLE_KERNEL REGISTER_CK(half, false, false, ""); REGISTER_CK(half, true, false, "Biased"); REGISTER_CK(half, false, true, "Masked"); REGISTER_CK(half, true, true, "BiasedMasked"); #endif REGISTER_TUNABLE(half); } } // namespace onnxruntime
08f564a5132aa457e67287aa82e3634d8a9762ec.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "pybind11/stl.h" #include "contrib_ops/rocm/bert/batched_gemm_softmax_gemm_permute_pipelines.cuh" #include "core/providers/rocm/tunable/rocm_tunable.h" #include "python/tools/kernel_explorer/device_array.h" #include "python/tools/kernel_explorer/kernel_explorer_interface.h" #include <vector> namespace py = pybind11; using namespace onnxruntime::contrib::rocm; namespace onnxruntime { template <typename T> class IGemmSoftmaxGemmPermuteKernelExplorer : public IKernelExplorer { public: IGemmSoftmaxGemmPermuteKernelExplorer( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) { ROCBLAS_CALL_THROW(rocblas_create_handle(&rocblas_handle_)); attn_.batch_size = batch; attn_.sequence_length = seqlen; attn_.kv_sequence_length = seqlen; // NOTE: not used attn_.past_sequence_length = 0; attn_.original_past_sequence_length = 0; // NOTE: not used attn_.total_sequence_length = total_seqlen; attn_.max_sequence_length = 0; attn_.hidden_size = num_heads * head_size; attn_.head_size = head_size; attn_.v_hidden_size = attn_.hidden_size; // Q,K,V hidden size must agree now attn_.v_head_size = attn_.head_size; // Q,K,V hidden size must agree now attn_.num_heads = num_heads; attn_.is_unidirectional = false; attn_.past_present_share_buffer = false; attn_.do_rotary = false; attn_.mask_filter_value = -10000.0f; attn_.scale = scale; if (mask_dim == 0) { attn_.mask_type = contrib::MASK_NONE; } else if (mask_dim == 2) { attn_.mask_type = contrib::MASK_2D_KEY_PADDING; } else if (mask_dim == 3) { attn_.mask_type = contrib::MASK_3D_ATTENTION; } else if (mask_dim == 4) { attn_.mask_type = contrib::MASK_4D_MEGATRON; } else { ORT_ENFORCE(false, "mask type not supported"); } device_prop = GetEp()->GetDeviceProp(); params_.tuning_ctx = TuningContext(); params_.stream = Stream(); params_.handle = rocblas_handle_; params_.attention = &attn_; params_.device_prop = &device_prop; params_.scale = scale; params_.q_buffer = reinterpret_cast<T*>(Q.ptr()); params_.k_buffer = reinterpret_cast<T*>(K.ptr()); params_.v_buffer = reinterpret_cast<T*>(V.ptr()); if (attn_bias.has_value()) { params_.bias_buffer = reinterpret_cast<T*>(attn_bias->ptr()); } if (attn_mask.has_value()) { params_.mask_index_buffer = reinterpret_cast<int*>(attn_mask->ptr()); if (mask_dim == 2) { params_.mask_index_dims = {batch, total_seqlen}; } else if (mask_dim == 3) { params_.mask_index_dims = {batch, seqlen, total_seqlen}; } else if (mask_dim == 4) { ORT_ENFORCE(max_seqlen.has_value()); attn_.max_sequence_length = max_seqlen.value(); ORT_ENFORCE(attn_.max_sequence_length >= seqlen); attn_.past_sequence_length = attn_.max_sequence_length - seqlen; params_.mask_index_dims = {batch, 1, attn_.max_sequence_length, attn_.max_sequence_length}; } } params_.out_buffer = reinterpret_cast<T*>(out.ptr()); } ~IGemmSoftmaxGemmPermuteKernelExplorer() { ROCBLAS_CALL_THROW(rocblas_destroy_handle(rocblas_handle_)); } void SetWorkspace(size_t num_bytes) { void* ptr; HIP_CALL_THROW(hipMalloc(&ptr, num_bytes)); workspace_.reset(ptr, [](void* ptr) { HIP_CALL_THROW(hipFree(ptr)); }); params_.workspace_buffer = reinterpret_cast<T*>(workspace_.get()); } protected: using ParamsT = contrib::rocm::GemmSoftmaxGemmPermuteParams<T>; rocblas_handle rocblas_handle_; hipDeviceProp_t device_prop; contrib::AttentionParameters attn_; ParamsT params_; std::shared_ptr<void> workspace_; }; // The pipeline composed from rocblas api calls and kernel launches. template <typename T> class GemmSoftmaxGemmPermuteGeneric : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteGeneric( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(GemmSoftmaxGemmPermuteGenericPipeline<T>::GetWorkspaceNumBytes(&this->attn_)); } std::vector<std::string> ListOps() const { return {"Generic"}; } bool SelectOp(const std::string&) { return true; } void Run() override { ORT_THROW_IF_ERROR(GemmSoftmaxGemmPermuteGenericPipeline<T>::Run( &this->params_, /*use_persistent_softmax=*/false)); } }; #ifdef USE_COMPOSABLE_KERNEL template <typename T, bool USE_BIAS, bool USE_MASK> class GemmSoftmaxGemmPermuteCK : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteCK( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(GemmSoftmaxGemmPermuteTunableOp<T>::GetWorkspaceNumBytes(&this->attn_)); for (auto&& [ts, op] : GetCKGemmSoftmaxGemmPermuteTypeStringAndOps<T, USE_BIAS, USE_MASK>()) { type_strings_.emplace_back(std::move(ts)); ops_.emplace_back(std::move(op)); } } std::vector<std::string> ListOps() const { return type_strings_; } bool SelectOp(const std::string& name) { for (size_t i = 0; i < ops_.size(); i++) { if (type_strings_[i] == name) { selected_op_ = i; Status status = ops_[i].IsSupported(&this->params_); return status.IsOK(); } } ORT_THROW("Cannot find implementation ", name); } void Run() override { ORT_THROW_IF_ERROR(ops_[selected_op_](&this->params_)); } private: using ParamsT = typename IGemmSoftmaxGemmPermuteKernelExplorer<T>::ParamsT; using OpT = Op<ParamsT>; std::vector<OpT> ops_; std::vector<std::string> type_strings_; size_t selected_op_{}; }; #endif // USE_COMPOSABLE_KERNEL // The pipeline composed from rocblas api calls and kernel launches. template <typename T> class GemmSoftmaxGemmPermuteTunable : public IGemmSoftmaxGemmPermuteKernelExplorer<T> { public: GemmSoftmaxGemmPermuteTunable( int64_t batch, int64_t seqlen, int64_t total_seqlen, std::optional<int64_t> max_seqlen, int64_t num_heads, int64_t head_size, int64_t mask_dim, double scale, DeviceArray& Q, DeviceArray& K, DeviceArray& V, std::optional<DeviceArray>& attn_bias, std::optional<DeviceArray>& attn_mask, DeviceArray& out) : IGemmSoftmaxGemmPermuteKernelExplorer<T>(batch, seqlen, total_seqlen, max_seqlen, num_heads, head_size, mask_dim, scale, Q, K, V, attn_bias, attn_mask, out) { this->SetWorkspace(std::max( GemmSoftmaxGemmPermuteGenericPipeline<T>::GetWorkspaceNumBytes(&this->attn_), GemmSoftmaxGemmPermuteTunableOp<T>::GetWorkspaceNumBytes(&this->attn_))); this->params_.TuningContext()->EnableTunableOpAndTuning(); } std::vector<std::string> ListOps() const { return {"Tunable"}; } bool SelectOp(const std::string&) { return true; } void Run() override { ORT_THROW_IF_ERROR(GemmSoftmaxGemmPermuteTunableOp<T>{}(&this->params_)); } }; #define REGISTER_COMMON(name, type, ...) \ py::class_<type<__VA_ARGS__>>(m, name) \ .def(py::init<int64_t, int64_t, int64_t, std::optional<int64_t>, int64_t, int64_t, int64_t, \ float, \ DeviceArray&, \ DeviceArray&, \ DeviceArray&, \ std::optional<DeviceArray>&, \ std::optional<DeviceArray>&, \ DeviceArray&>()) \ .def("SetRepeats", &type<__VA_ARGS__>::SetRepeats) \ .def("Run", &type<__VA_ARGS__>::Run) \ .def("Profile", &type<__VA_ARGS__>::Profile) \ .def("ListOps", &type<__VA_ARGS__>::ListOps) \ .def("SelectOp", &type<__VA_ARGS__>::SelectOp); #define REGISTER_GENERIC(dtype) \ REGISTER_COMMON("GemmSoftmaxGemmPermuteGeneric_" #dtype, GemmSoftmaxGemmPermuteGeneric, dtype) #define REGISTER_CK(dtype, biased, masked, mask_bias_suffix) \ REGISTER_COMMON( \ "GemmSoftmaxGemmPermuteCK" mask_bias_suffix "_" #dtype, GemmSoftmaxGemmPermuteCK, dtype, biased, masked) #define REGISTER_TUNABLE(dtype) \ REGISTER_COMMON("GemmSoftmaxGemmPermuteTunable_" #dtype, GemmSoftmaxGemmPermuteTunable, dtype) KE_REGISTER(m) { REGISTER_GENERIC(half); #ifdef USE_COMPOSABLE_KERNEL REGISTER_CK(half, false, false, ""); REGISTER_CK(half, true, false, "Biased"); REGISTER_CK(half, false, true, "Masked"); REGISTER_CK(half, true, true, "BiasedMasked"); #endif REGISTER_TUNABLE(half); } } // namespace onnxruntime
816528914f8c3b307fbe5e9e57f0c77ba88b422d.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <parboil.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include "util.h" __global__ void histo_prescan_kernel ( unsigned int* input, int size, unsigned int* minmax); __global__ void histo_main_kernel ( uchar4 *sm_mappings, unsigned int num_elements, unsigned int sm_range_min, unsigned int sm_range_max, unsigned int histo_height, unsigned int histo_width, unsigned int *global_subhisto, unsigned int *global_histo, unsigned int *global_overflow); __global__ void histo_intermediates_kernel ( uint2 *input, unsigned int height, unsigned int width, unsigned int input_pitch, uchar4 *sm_mappings); __global__ void histo_final_kernel ( unsigned int sm_range_min, unsigned int sm_range_max, unsigned int histo_height, unsigned int histo_width, unsigned int *global_subhisto, unsigned int *global_histo, unsigned int *global_overflow, unsigned int *final_histo); /****************************************************************************** * Implementation: GPU * Details: * in the GPU implementation of histogram, we begin by computing the span of the * input values into the histogram. Then the histogramming computation is carried * out by a (BLOCK_X, BLOCK_Y) sized grid, where every group of Y (same X) * computes its own partial histogram for a part of the input, and every Y in the * group exclusively writes to a portion of the span computed in the beginning. * Finally, a reduction is performed to combine all the partial histograms into * the final result. ******************************************************************************/ int main(int argc, char* argv[]) { //struct pb_TimerSet timers; //struct pb_Parameters *parameters; //parameters = pb_ReadParameters(&argc, argv); //if (!parameters) // return -1; /*if(!parameters->inpFiles[0]){ fputs("Input file expected\n", stderr); return -1; }*/ char *prescans = "PreScanKernel"; char *postpremems = "PostPreMems"; char *intermediates = "IntermediatesKernel"; char *mains = "MainKernel"; char *finals = "FinalKernel"; //pb_InitializeTimerSet(&timers); //pb_AddSubTimer(&timers, prescans, pb_TimerID_KERNEL); //pb_AddSubTimer(&timers, postpremems, pb_TimerID_KERNEL); //pb_AddSubTimer(&timers, intermediates, pb_TimerID_KERNEL); //pb_AddSubTimer(&timers, mains, pb_TimerID_KERNEL); //pb_AddSubTimer(&timers, finals, pb_TimerID_KERNEL); //pb_SwitchToTimer(&timers, pb_TimerID_IO); int numIterations = 1; /*if (argc >= 2){ numIterations = atoi(argv[1]); } else { fputs("Expected at least one command line argument\n", stderr); return -1; }*/ unsigned int img_width, img_height; unsigned int histo_width, histo_height; //parameters->inpFiles[0] = "/home/peng/PPoPP14-GKLEE-Test/Parboil/parboil/datasets/histo/default/input/img.bin"; //FILE* f = fopen(parameters->inpFiles[0],"rb"); FILE* f = fopen("/home/peng/PPoPP14-GKLEE-Test/Parboil/parboil/datasets/histo/default/input/img.bin","rb"); int result = 0; result += fread(&img_width, sizeof(unsigned int), 1, f); result += fread(&img_height, sizeof(unsigned int), 1, f); result += fread(&histo_width, sizeof(unsigned int), 1, f); result += fread(&histo_height, sizeof(unsigned int), 1, f); if (result != 4) { fputs("Error reading input and output dimensions from file\n", stderr); return -1; } printf("img_width: %u, img_height: %u, histo_width: %u, histo_height: %u \n", img_width, img_height, histo_width, histo_height); unsigned int* img = (unsigned int*) malloc (img_width*img_height*sizeof(unsigned int)); unsigned char* histo = (unsigned char*) calloc (histo_width*histo_height, sizeof(unsigned char)); result = fread(img, sizeof(unsigned int), img_width*img_height, f); fclose(f); if (result != img_width*img_height){ fputs("Error reading input array from file\n", stderr); return -1; } int even_width = ((img_width+1)/2)*2; unsigned int* input; unsigned int* ranges; uchar4* sm_mappings; unsigned int* global_subhisto; unsigned short* global_histo; unsigned int* global_overflow; unsigned char* final_histo; hipMalloc((void**)&input , even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int)); hipMalloc((void**)&ranges , 2*sizeof(unsigned int)); hipMalloc((void**)&sm_mappings , img_width*img_height*sizeof(uchar4)); hipMalloc((void**)&global_subhisto , BLOCK_X*img_width*histo_height*sizeof(unsigned int)); hipMalloc((void**)&global_histo , img_width*histo_height*sizeof(unsigned short)); hipMalloc((void**)&global_overflow , img_width*histo_height*sizeof(unsigned int)); hipMalloc((void**)&final_histo , img_width*histo_height*sizeof(unsigned char)); hipMemset(final_histo , 0 , img_width*histo_height*sizeof(unsigned char)); for (int y=0; y < img_height; y++){ hipMemcpy(&(((unsigned int*)input)[y*even_width]),&img[y*img_width],img_width*sizeof(unsigned int), hipMemcpyHostToDevice); } //pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); for (int iter = 0; iter < numIterations; iter++) { unsigned int ranges_h[2] = {UINT32_MAX, 0}; hipMemcpy(ranges,ranges_h, 2*sizeof(unsigned int), hipMemcpyHostToDevice); //pb_SwitchToSubTimer(&timers, prescans , pb_TimerID_KERNEL); #ifdef _PRESCAN #ifdef _SYM klee_make_symbolic(ranges, 2*sizeof(unsigned int), "ranges_input"); #endif hipLaunchKernelGGL(( histo_prescan_kernel), dim3(dim3(PRESCAN_BLOCKS_X)),dim3(dim3(PRESCAN_THREADS)), 0, 0, (unsigned int*)input, img_height*img_width, ranges); //pb_SwitchToSubTimer(&timers, postpremems , pb_TimerID_KERNEL); hipMemcpy(ranges_h,ranges, 2*sizeof(unsigned int), hipMemcpyDeviceToHost); #endif hipMemset(global_subhisto,0,img_width*histo_height*sizeof(unsigned int)); //pb_SwitchToSubTimer(&timers, intermediates, pb_TimerID_KERNEL); #ifdef _INTERMEDIATE #ifdef _SYM klee_make_symbolic(sm_mappings, img_width*img_height*sizeof(uchar4), "sm_mappings_input"); klee_make_symbolic(global_histo, img_width*histo_height*sizeof(unsigned short), "global_histo_input"); #endif hipLaunchKernelGGL(( histo_intermediates_kernel), dim3(dim3((img_height + UNROLL-1)/UNROLL)), dim3(dim3((img_width+1)/2)), 0, 0, (uint2*)(input), (unsigned int)img_height, (unsigned int)img_width, (img_width+1)/2, (uchar4*)(sm_mappings) ); //pb_SwitchToSubTimer(&timers, mains, pb_TimerID_KERNEL); hipLaunchKernelGGL(( histo_main_kernel), dim3(dim3(BLOCK_X, ranges_h[1]-ranges_h[0]+1)), dim3(dim3(THREADS)), 0, 0, (uchar4*)(sm_mappings), img_height*img_width, ranges_h[0], ranges_h[1], histo_height, histo_width, (unsigned int*)(global_subhisto), (unsigned int*)(global_histo), (unsigned int*)(global_overflow) ); #endif //pb_SwitchToSubTimer(&timers, finals, pb_TimerID_KERNEL); // no arguments ... #ifdef _FINAL printf("img_width*histo_height*sizeof(unsigned short): %u \n", img_width*histo_height*sizeof(unsigned short)); hipLaunchKernelGGL(( histo_final_kernel), dim3(dim3(BLOCK_X*3)), dim3(dim3(512)), 0, 0, ranges_h[0], ranges_h[1], histo_height, histo_width, (unsigned int*)(global_subhisto), (unsigned int*)(global_histo), (unsigned int*)(global_overflow), (unsigned int*)(final_histo) ); #endif } //pb_SwitchToTimer(&timers, pb_TimerID_IO); hipMemcpy(histo,final_histo, histo_height*histo_width*sizeof(unsigned char), hipMemcpyDeviceToHost); hipFree(input); hipFree(ranges); hipFree(sm_mappings); hipFree(global_subhisto); hipFree(global_histo); hipFree(global_overflow); hipFree(final_histo); /*if (parameters->outFile) { dump_histo_img(histo, histo_height, histo_width, parameters->outFile); }*/ //pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); free(img); free(histo); //pb_SwitchToTimer(&timers, pb_TimerID_NONE); printf("\n"); //pb_PrintTimerSet(&timers); //pb_FreeParameters(parameters); //pb_DestroyTimerSet(&timers); return 0; }
816528914f8c3b307fbe5e9e57f0c77ba88b422d.cu
/************************************************************************* * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <parboil.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include "util.h" __global__ void histo_prescan_kernel ( unsigned int* input, int size, unsigned int* minmax); __global__ void histo_main_kernel ( uchar4 *sm_mappings, unsigned int num_elements, unsigned int sm_range_min, unsigned int sm_range_max, unsigned int histo_height, unsigned int histo_width, unsigned int *global_subhisto, unsigned int *global_histo, unsigned int *global_overflow); __global__ void histo_intermediates_kernel ( uint2 *input, unsigned int height, unsigned int width, unsigned int input_pitch, uchar4 *sm_mappings); __global__ void histo_final_kernel ( unsigned int sm_range_min, unsigned int sm_range_max, unsigned int histo_height, unsigned int histo_width, unsigned int *global_subhisto, unsigned int *global_histo, unsigned int *global_overflow, unsigned int *final_histo); /****************************************************************************** * Implementation: GPU * Details: * in the GPU implementation of histogram, we begin by computing the span of the * input values into the histogram. Then the histogramming computation is carried * out by a (BLOCK_X, BLOCK_Y) sized grid, where every group of Y (same X) * computes its own partial histogram for a part of the input, and every Y in the * group exclusively writes to a portion of the span computed in the beginning. * Finally, a reduction is performed to combine all the partial histograms into * the final result. ******************************************************************************/ int main(int argc, char* argv[]) { //struct pb_TimerSet timers; //struct pb_Parameters *parameters; //parameters = pb_ReadParameters(&argc, argv); //if (!parameters) // return -1; /*if(!parameters->inpFiles[0]){ fputs("Input file expected\n", stderr); return -1; }*/ char *prescans = "PreScanKernel"; char *postpremems = "PostPreMems"; char *intermediates = "IntermediatesKernel"; char *mains = "MainKernel"; char *finals = "FinalKernel"; //pb_InitializeTimerSet(&timers); //pb_AddSubTimer(&timers, prescans, pb_TimerID_KERNEL); //pb_AddSubTimer(&timers, postpremems, pb_TimerID_KERNEL); //pb_AddSubTimer(&timers, intermediates, pb_TimerID_KERNEL); //pb_AddSubTimer(&timers, mains, pb_TimerID_KERNEL); //pb_AddSubTimer(&timers, finals, pb_TimerID_KERNEL); //pb_SwitchToTimer(&timers, pb_TimerID_IO); int numIterations = 1; /*if (argc >= 2){ numIterations = atoi(argv[1]); } else { fputs("Expected at least one command line argument\n", stderr); return -1; }*/ unsigned int img_width, img_height; unsigned int histo_width, histo_height; //parameters->inpFiles[0] = "/home/peng/PPoPP14-GKLEE-Test/Parboil/parboil/datasets/histo/default/input/img.bin"; //FILE* f = fopen(parameters->inpFiles[0],"rb"); FILE* f = fopen("/home/peng/PPoPP14-GKLEE-Test/Parboil/parboil/datasets/histo/default/input/img.bin","rb"); int result = 0; result += fread(&img_width, sizeof(unsigned int), 1, f); result += fread(&img_height, sizeof(unsigned int), 1, f); result += fread(&histo_width, sizeof(unsigned int), 1, f); result += fread(&histo_height, sizeof(unsigned int), 1, f); if (result != 4) { fputs("Error reading input and output dimensions from file\n", stderr); return -1; } printf("img_width: %u, img_height: %u, histo_width: %u, histo_height: %u \n", img_width, img_height, histo_width, histo_height); unsigned int* img = (unsigned int*) malloc (img_width*img_height*sizeof(unsigned int)); unsigned char* histo = (unsigned char*) calloc (histo_width*histo_height, sizeof(unsigned char)); result = fread(img, sizeof(unsigned int), img_width*img_height, f); fclose(f); if (result != img_width*img_height){ fputs("Error reading input array from file\n", stderr); return -1; } int even_width = ((img_width+1)/2)*2; unsigned int* input; unsigned int* ranges; uchar4* sm_mappings; unsigned int* global_subhisto; unsigned short* global_histo; unsigned int* global_overflow; unsigned char* final_histo; cudaMalloc((void**)&input , even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int)); cudaMalloc((void**)&ranges , 2*sizeof(unsigned int)); cudaMalloc((void**)&sm_mappings , img_width*img_height*sizeof(uchar4)); cudaMalloc((void**)&global_subhisto , BLOCK_X*img_width*histo_height*sizeof(unsigned int)); cudaMalloc((void**)&global_histo , img_width*histo_height*sizeof(unsigned short)); cudaMalloc((void**)&global_overflow , img_width*histo_height*sizeof(unsigned int)); cudaMalloc((void**)&final_histo , img_width*histo_height*sizeof(unsigned char)); cudaMemset(final_histo , 0 , img_width*histo_height*sizeof(unsigned char)); for (int y=0; y < img_height; y++){ cudaMemcpy(&(((unsigned int*)input)[y*even_width]),&img[y*img_width],img_width*sizeof(unsigned int), cudaMemcpyHostToDevice); } //pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); for (int iter = 0; iter < numIterations; iter++) { unsigned int ranges_h[2] = {UINT32_MAX, 0}; cudaMemcpy(ranges,ranges_h, 2*sizeof(unsigned int), cudaMemcpyHostToDevice); //pb_SwitchToSubTimer(&timers, prescans , pb_TimerID_KERNEL); #ifdef _PRESCAN #ifdef _SYM klee_make_symbolic(ranges, 2*sizeof(unsigned int), "ranges_input"); #endif histo_prescan_kernel<<<dim3(PRESCAN_BLOCKS_X),dim3(PRESCAN_THREADS)>>>((unsigned int*)input, img_height*img_width, ranges); //pb_SwitchToSubTimer(&timers, postpremems , pb_TimerID_KERNEL); cudaMemcpy(ranges_h,ranges, 2*sizeof(unsigned int), cudaMemcpyDeviceToHost); #endif cudaMemset(global_subhisto,0,img_width*histo_height*sizeof(unsigned int)); //pb_SwitchToSubTimer(&timers, intermediates, pb_TimerID_KERNEL); #ifdef _INTERMEDIATE #ifdef _SYM klee_make_symbolic(sm_mappings, img_width*img_height*sizeof(uchar4), "sm_mappings_input"); klee_make_symbolic(global_histo, img_width*histo_height*sizeof(unsigned short), "global_histo_input"); #endif histo_intermediates_kernel<<<dim3((img_height + UNROLL-1)/UNROLL), dim3((img_width+1)/2)>>>( (uint2*)(input), (unsigned int)img_height, (unsigned int)img_width, (img_width+1)/2, (uchar4*)(sm_mappings) ); //pb_SwitchToSubTimer(&timers, mains, pb_TimerID_KERNEL); histo_main_kernel<<<dim3(BLOCK_X, ranges_h[1]-ranges_h[0]+1), dim3(THREADS)>>>( (uchar4*)(sm_mappings), img_height*img_width, ranges_h[0], ranges_h[1], histo_height, histo_width, (unsigned int*)(global_subhisto), (unsigned int*)(global_histo), (unsigned int*)(global_overflow) ); #endif //pb_SwitchToSubTimer(&timers, finals, pb_TimerID_KERNEL); // no arguments ... #ifdef _FINAL printf("img_width*histo_height*sizeof(unsigned short): %u \n", img_width*histo_height*sizeof(unsigned short)); histo_final_kernel<<<dim3(BLOCK_X*3), dim3(512)>>>( ranges_h[0], ranges_h[1], histo_height, histo_width, (unsigned int*)(global_subhisto), (unsigned int*)(global_histo), (unsigned int*)(global_overflow), (unsigned int*)(final_histo) ); #endif } //pb_SwitchToTimer(&timers, pb_TimerID_IO); cudaMemcpy(histo,final_histo, histo_height*histo_width*sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaFree(input); cudaFree(ranges); cudaFree(sm_mappings); cudaFree(global_subhisto); cudaFree(global_histo); cudaFree(global_overflow); cudaFree(final_histo); /*if (parameters->outFile) { dump_histo_img(histo, histo_height, histo_width, parameters->outFile); }*/ //pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); free(img); free(histo); //pb_SwitchToTimer(&timers, pb_TimerID_NONE); printf("\n"); //pb_PrintTimerSet(&timers); //pb_FreeParameters(parameters); //pb_DestroyTimerSet(&timers); return 0; }
8b63bd99c0d7b00d5f2ad96d2687359aa48bc836.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dpra_general.cuh" #include "helper_cuda.h" #include "device_launch_parameters.h" namespace DPRA{ /*---------------------------------------CUDA Kernels----------------------------------*/ /* PURPOSE: Load the image into its padded version (0's at boundary) INPUTS: d_in_img: unpadded image iImgWidth, iImgHeihgt: image size iPaddedWidth, iPaddedHeight: padded size OUTPUTS: d_out_img_Padded: padded image */ __global__ void load_img_padding_kernel(uchar *d_out_img_Padded, const uchar *d_in_img, int iImgWidth, int iImgHeight, int iPaddedWidth, int iPaddedHeight) { const int y = threadIdx.y + blockIdx.y * blockDim.y; const int x = threadIdx.x + blockIdx.x * blockDim.x; int idImg = (y - 1)*iImgWidth + x - 1; int idPadded = y*iPaddedWidth + x; if (y < iPaddedHeight && x < iPaddedWidth) { if (y > 0 && y < iPaddedHeight - 1 && x>0 && x < iPaddedWidth - 1) { d_out_img_Padded[idPadded] = d_in_img[idImg]; } else { d_out_img_Padded[idPadded] = 0; } } } /* PURPOSE: Pre-compute the cos(phi) and sin(phi) with padding of 0 at boundary INPUTS: d_in_Phi: the ref phi iWidth, iHeight: iWidth = iImgWidth +2, iHeight = iImgWidth +2 */ __global__ void compute_cosPhi_sinPhi_kernel(float *d_out_cosPhi, float *d_out_sinPhi, float *d_in_Phi, const int iWidth, const int iHeight, const int iPaddedWidth, const int iPaddedHeight) { const int y = threadIdx.y + blockIdx.y * blockDim.y; const int x = threadIdx.x + blockIdx.x * blockDim.x; int idPadded = y*iPaddedWidth + x; int idImg = (y - 1)*iWidth + x - 1; if (y < iPaddedHeight && x < iPaddedWidth) { if (y > 0 && y < iPaddedHeight - 1 && x > 0 && x < iPaddedWidth - 1) { float tempPhi = d_in_Phi[idImg]; d_out_cosPhi[idPadded] = cos(tempPhi); d_out_sinPhi[idPadded] = sin(tempPhi); } else { d_out_cosPhi[idPadded] = 0; d_out_sinPhi[idPadded] = 0; } } } /* PURPOSE: Pre-compute the cos(phi) and sin(phi) with padding of 0 at boundary INPUTS: d_in_Phi: the ref phi iWidth, iHeight: iWidth = iImgWidth +2, iHeight = iImgWidth +2 */ __global__ void compute_cosPhi_sinPhi_double_kernel(double *d_out_cosPhi, double *d_out_sinPhi, double *d_in_Phi, const int iWidth, const int iHeight, const int iPaddedWidth, const int iPaddedHeight) { const int y = threadIdx.y + blockIdx.y * blockDim.y; const int x = threadIdx.x + blockIdx.x * blockDim.x; int idPadded = y*iPaddedWidth + x; int idImg = (y - 1)*iWidth + x - 1; if (y < iPaddedHeight && x < iPaddedWidth) { if (y > 0 && y < iPaddedHeight - 1 && x > 0 && x < iPaddedWidth - 1) { double tempPhi = d_in_Phi[idImg]; d_out_cosPhi[idPadded] = cos(tempPhi); d_out_sinPhi[idPadded] = sin(tempPhi); } else { d_out_cosPhi[idPadded] = 0; d_out_sinPhi[idPadded] = 0; } } } /* PURPOSE: Generate all matrix A and b for each pixel on GPU INPUTS: d_in_imgPadded: padded image d_in_cosPhi: padded cosPhi d_in_sinPhi: padded sinPhi iImgWidth, iImgHeight: image size iPaddedWidth, iPaddedHeight: padded size OUTPUTS: d_out_A: matrix A d_out_b: vector b */ __global__ void generate_A_b_kernel(float *d_out_A, float *d_out_b, const uchar *d_in_imgPadded, const float *d_in_cosphi, const float *d_in_sinphi, const int iImgWidth, const int iImgHeight, const int iPaddedWidth, const int iPaddedHeight) { const int y = threadIdx.y + (BLOCK_SIZE_16 - 2) * blockIdx.y; const int x = threadIdx.x + (BLOCK_SIZE_16 - 2) * blockIdx.x; float sum_cos = 0, sum_sin = 0, sum_sincos = 0, sum_sin2 = 0, sum_cos2 = 0; float sum_ft = 0, sum_ft_cos = 0, sum_ft_sin = 0; // Global Memory offset: every block actually begin with 2 overlapped pixels __shared__ float cos_phi_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; __shared__ float sin_phi_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; __shared__ uchar img_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; // Load the global mem to shared mem if (y < iPaddedHeight && x < iPaddedWidth) { cos_phi_sh[threadIdx.y][threadIdx.x] = d_in_cosphi[y*iPaddedWidth + x]; sin_phi_sh[threadIdx.y][threadIdx.x] = d_in_sinphi[y*iPaddedWidth + x]; img_sh[threadIdx.y][threadIdx.x] = d_in_imgPadded[y*iPaddedWidth + x]; } __syncthreads(); // Compute the results within the boundary if (y >= 1 && y < iPaddedHeight - 1 && x >= 1 && x < iPaddedWidth - 1 && threadIdx.x != 0 && threadIdx.x != BLOCK_SIZE_16 - 1 && threadIdx.y != 0 && threadIdx.y != BLOCK_SIZE_16 - 1) { int idA = ((y - 1)*iImgWidth + x - 1) * 9; int idb = ((y - 1)*iImgWidth + x - 1) * 3; sum_cos = 0, sum_sin = 0, sum_sincos = 0, sum_sin2 = 0, sum_cos2 = 0; sum_ft = 0, sum_ft_cos = 0, sum_ft_sin = 0; for (int i = threadIdx.y - 1; i <= threadIdx.y + 1; i++) { for (int j = threadIdx.x - 1; j <= threadIdx.x + 1; j++) { float cos_phi = cos_phi_sh[i][j]; float sin_phi = sin_phi_sh[i][j]; float ft = static_cast<float>(img_sh[i][j]); // Elements of A sum_cos += cos_phi; sum_sin += sin_phi; sum_sincos += cos_phi * sin_phi; sum_sin2 += sin_phi*sin_phi; sum_cos2 += cos_phi*cos_phi; // Elements of b sum_ft += ft; sum_ft_cos += ft * cos_phi; sum_ft_sin += ft * sin_phi; } } d_out_A[idA + 0] = 9; d_out_A[idA + 1] = 0; d_out_A[idA + 2] = 0; d_out_A[idA + 3] = sum_cos; d_out_A[idA + 4] = sum_cos2; d_out_A[idA + 5] = 0; d_out_A[idA + 6] = sum_sin; d_out_A[idA + 7] = sum_sincos; d_out_A[idA + 8] = sum_sin2; d_out_b[idb + 0] = sum_ft; d_out_b[idb + 1] = sum_ft_cos; d_out_b[idb + 2] = sum_ft_sin; } } /* PURPOSE: Generate all matrix A and b for each pixel on GPU INPUTS: d_in_imgPadded: padded image d_in_cosPhi: padded cosPhi d_in_sinPhi: padded sinPhi iImgWidth, iImgHeight: image size iPaddedWidth, iPaddedHeight: padded size OUTPUTS: d_out_A: matrix A d_out_b: vector b */ __global__ void generate_A_b_double_kernel(double *d_out_A, double *d_out_b, const uchar *d_in_imgPadded, const double *d_in_cosphi, const double *d_in_sinphi, const int iImgWidth, const int iImgHeight, const int iPaddedWidth, const int iPaddedHeight) { const int y = threadIdx.y + (BLOCK_SIZE_16 - 2) * blockIdx.y; const int x = threadIdx.x + (BLOCK_SIZE_16 - 2) * blockIdx.x; double sum_cos = 0, sum_sin = 0, sum_sincos = 0, sum_sin2 = 0, sum_cos2 = 0; double sum_ft = 0, sum_ft_cos = 0, sum_ft_sin = 0; // Global Memory offset: every block actually begin with 2 overlapped pixels __shared__ double cos_phi_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; __shared__ double sin_phi_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; __shared__ uchar img_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; // Load the global mem to shared mem if (y < iPaddedHeight && x < iPaddedWidth) { cos_phi_sh[threadIdx.y][threadIdx.x] = d_in_cosphi[y*iPaddedWidth + x]; sin_phi_sh[threadIdx.y][threadIdx.x] = d_in_sinphi[y*iPaddedWidth + x]; img_sh[threadIdx.y][threadIdx.x] = d_in_imgPadded[y*iPaddedWidth + x]; } __syncthreads(); // Compute the results within the boundary if (y >= 1 && y < iPaddedHeight - 1 && x >= 1 && x < iPaddedWidth - 1 && threadIdx.x != 0 && threadIdx.x != BLOCK_SIZE_16 - 1 && threadIdx.y != 0 && threadIdx.y != BLOCK_SIZE_16 - 1) { int idA = ((y - 1)*iImgWidth + x - 1) * 9; int idb = ((y - 1)*iImgWidth + x - 1) * 3; sum_cos = 0, sum_sin = 0, sum_sincos = 0, sum_sin2 = 0, sum_cos2 = 0; sum_ft = 0, sum_ft_cos = 0, sum_ft_sin = 0; for (int i = threadIdx.y - 1; i <= threadIdx.y + 1; i++) { for (int j = threadIdx.x - 1; j <= threadIdx.x + 1; j++) { double cos_phi = cos_phi_sh[i][j]; double sin_phi = sin_phi_sh[i][j]; double ft = static_cast<double>(img_sh[i][j]); // Elements of A sum_cos += cos_phi; sum_sin += sin_phi; sum_sincos += cos_phi * sin_phi; sum_sin2 += sin_phi*sin_phi; sum_cos2 += cos_phi*cos_phi; // Elements of b sum_ft += ft; sum_ft_cos += ft * cos_phi; sum_ft_sin += ft * sin_phi; } } d_out_A[idA + 0] = 9; d_out_A[idA + 1] = 0; d_out_A[idA + 2] = 0; d_out_A[idA + 3] = sum_cos; d_out_A[idA + 4] = sum_cos2; d_out_A[idA + 5] = 0; d_out_A[idA + 6] = sum_sin; d_out_A[idA + 7] = sum_sincos; d_out_A[idA + 8] = sum_sin2; d_out_b[idb + 0] = sum_ft; d_out_b[idb + 1] = sum_ft_cos; d_out_b[idb + 2] = sum_ft_sin; } } /* PURPOSE: Get the current & deltaPhi on device */ __global__ void get_deltaPhi_currPhi_kernel(float *d_out_deltaPhi, float *d_out_currPhi, float *d_in_dphiRef, float *d_in_refPhi, hipfftComplex *d_in_filtered, const int iSize) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < iSize; i += blockDim.x * gridDim.x) { float temp = atan2f(d_in_filtered[i].y, d_in_filtered[i].x); float tempRefPhi = d_in_refPhi[i]; d_out_deltaPhi[i] = d_in_dphiRef[i] + temp; d_out_currPhi[i] = atan2f(sinf(temp + tempRefPhi), cos(temp + tempRefPhi)); } } /* PURPOSE: Get the current & deltaPhi on device */ __global__ void get_deltaPhi_currPhi_double_kernel(double *d_out_deltaPhi, double *d_out_currPhi, double *d_in_dphiRef, double *d_in_refPhi, hipfftDoubleComplex *d_in_filtered, const int iSize) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < iSize; i += blockDim.x * gridDim.x) { double temp = atan2f(d_in_filtered[i].y, d_in_filtered[i].x); double tempRefPhi = d_in_refPhi[i]; d_out_deltaPhi[i] = d_in_dphiRef[i] + temp; d_out_currPhi[i] = atan2f(sinf(temp + tempRefPhi), cos(temp + tempRefPhi)); } } __global__ void update_dphiRef_kernel(float *d_out_dphiRef, const float *d_in_dphi, const int iSize) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < iSize; i += blockDim.x * gridDim.x) { d_out_dphiRef[i] += d_in_dphi[i]; } } __global__ void update_dphiRef_double_kernel(double *d_out_dphiRef, const double *d_in_dphi, const int iSize) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < iSize; i += blockDim.x * gridDim.x) { d_out_dphiRef[i] += d_in_dphi[i]; } } /*--------------------------------------End CUDA Kernels--------------------------------*/ void load_img_padding(uchar *d_out_img_Padded, const uchar *d_in_img, int iImgWidth, int iImgHeight, int iPaddedWidth, int iPaddedHeight, const dim3 &blocks, const dim3 &threads) { hipLaunchKernelGGL(( load_img_padding_kernel), dim3(blocks), dim3(threads), 0, 0, d_out_img_Padded, d_in_img, iImgWidth, iImgHeight, iPaddedWidth, iPaddedHeight); getLastCudaError("load_img_padding_kernel launch failed!"); } void compute_cosPhi_sinPhi(float *d_out_cosPhi, float *d_out_sinPhi, float *d_in_Phi, const int iWidth, const int iHeight, const int iPaddedWidth, const int iPaddedHeight, const dim3 &blocks, const dim3 &threads) { hipLaunchKernelGGL(( compute_cosPhi_sinPhi_kernel), dim3(blocks), dim3(threads), 0, 0, d_out_cosPhi, d_out_sinPhi, d_in_Phi, iWidth, iHeight, iPaddedWidth, iPaddedHeight); getLastCudaError("compute_cosPhi_sinPhi_kernel launch failed!"); } void compute_cosPhi_sinPhi(double *d_out_cosPhi, double *d_out_sinPhi, double *d_in_Phi, const int iWidth, const int iHeight, const int iPaddedWidth, const int iPaddedHeight, const dim3 &blocks, const dim3 &threads) { hipLaunchKernelGGL(( compute_cosPhi_sinPhi_double_kernel), dim3(blocks), dim3(threads), 0, 0, d_out_cosPhi, d_out_sinPhi, d_in_Phi, iWidth, iHeight, iPaddedWidth, iPaddedHeight); getLastCudaError("compute_cosPhi_sinPhi_double_kernel launch failed!"); } void get_A_b(float *d_out_A, float *d_out_b, const uchar *d_in_imgPadded, const float *d_in_cosphi, const float *d_in_sinphi, const int iImgWidth, const int iImgHeight, const int iPaddedWidth, const int iPaddedHeight, const dim3 &blocks, const dim3 &threads) { hipLaunchKernelGGL(( generate_A_b_kernel), dim3(blocks), dim3(threads), 0, 0, d_out_A, d_out_b, d_in_imgPadded, d_in_cosphi, d_in_sinphi, iImgWidth, iImgHeight, iPaddedWidth, iPaddedHeight); getLastCudaError("generate_A_b_kernel launch failed!"); } void get_A_b(double *d_out_A, double *d_out_b, const uchar *d_in_imgPadded, const double *d_in_cosphi, const double *d_in_sinphi, const int iImgWidth, const int iImgHeight, const int iPaddedWidth, const int iPaddedHeight, const dim3 &blocks, const dim3 &threads) { hipLaunchKernelGGL(( generate_A_b_double_kernel), dim3(blocks), dim3(threads), 0, 0, d_out_A, d_out_b, d_in_imgPadded, d_in_cosphi, d_in_sinphi, iImgWidth, iImgHeight, iPaddedWidth, iPaddedHeight); getLastCudaError("generate_A_b_double_kernel launch failed!"); } void get_deltaPhi_currPhi(float *d_out_deltaPhi, float *d_out_currPhi, float *d_in_dphiRef, float *d_in_refPhi, hipfftComplex *d_in_filtered, const int iSize) { hipLaunchKernelGGL(( get_deltaPhi_currPhi_kernel), dim3(8*32), dim3(256), 0, 0, d_out_deltaPhi, d_out_currPhi, d_in_dphiRef, d_in_refPhi, d_in_filtered, iSize); getLastCudaError("get_deltaPhi_currPhi_kernel launch failed!"); } void get_deltaPhi_currPhi(double *d_out_deltaPhi, double *d_out_currPhi, double *d_in_dphiRef, double *d_in_refPhi, hipfftDoubleComplex *d_in_filtered, const int iSize) { hipLaunchKernelGGL(( get_deltaPhi_currPhi_double_kernel), dim3(8*32), dim3(256), 0, 0, d_out_deltaPhi, d_out_currPhi, d_in_dphiRef, d_in_refPhi, d_in_filtered, iSize); getLastCudaError("get_deltaPhi_currPhi_double_kernel launch failed!"); } void update_dphiRef(float *d_out_dphiRef, const float *d_in_dphi, const int iSize) { hipLaunchKernelGGL(( update_dphiRef_kernel), dim3(8*32), dim3(256), 0, 0, d_out_dphiRef, d_in_dphi, iSize); getLastCudaError("update_dphiRef_kernel launch failed!"); } void update_dphiRef(double *d_out_dphiRef, const double *d_in_dphi, const int iSize) { hipLaunchKernelGGL(( update_dphiRef_double_kernel), dim3(8*32), dim3(256), 0, 0, d_out_dphiRef, d_in_dphi, iSize); getLastCudaError("update_dphiRef_double_kernel launch failed!"); } } // namespace DPRA
8b63bd99c0d7b00d5f2ad96d2687359aa48bc836.cu
#include "dpra_general.cuh" #include "helper_cuda.h" #include "device_launch_parameters.h" namespace DPRA{ /*---------------------------------------CUDA Kernels----------------------------------*/ /* PURPOSE: Load the image into its padded version (0's at boundary) INPUTS: d_in_img: unpadded image iImgWidth, iImgHeihgt: image size iPaddedWidth, iPaddedHeight: padded size OUTPUTS: d_out_img_Padded: padded image */ __global__ void load_img_padding_kernel(uchar *d_out_img_Padded, const uchar *d_in_img, int iImgWidth, int iImgHeight, int iPaddedWidth, int iPaddedHeight) { const int y = threadIdx.y + blockIdx.y * blockDim.y; const int x = threadIdx.x + blockIdx.x * blockDim.x; int idImg = (y - 1)*iImgWidth + x - 1; int idPadded = y*iPaddedWidth + x; if (y < iPaddedHeight && x < iPaddedWidth) { if (y > 0 && y < iPaddedHeight - 1 && x>0 && x < iPaddedWidth - 1) { d_out_img_Padded[idPadded] = d_in_img[idImg]; } else { d_out_img_Padded[idPadded] = 0; } } } /* PURPOSE: Pre-compute the cos(phi) and sin(phi) with padding of 0 at boundary INPUTS: d_in_Phi: the ref phi iWidth, iHeight: iWidth = iImgWidth +2, iHeight = iImgWidth +2 */ __global__ void compute_cosPhi_sinPhi_kernel(float *d_out_cosPhi, float *d_out_sinPhi, float *d_in_Phi, const int iWidth, const int iHeight, const int iPaddedWidth, const int iPaddedHeight) { const int y = threadIdx.y + blockIdx.y * blockDim.y; const int x = threadIdx.x + blockIdx.x * blockDim.x; int idPadded = y*iPaddedWidth + x; int idImg = (y - 1)*iWidth + x - 1; if (y < iPaddedHeight && x < iPaddedWidth) { if (y > 0 && y < iPaddedHeight - 1 && x > 0 && x < iPaddedWidth - 1) { float tempPhi = d_in_Phi[idImg]; d_out_cosPhi[idPadded] = cos(tempPhi); d_out_sinPhi[idPadded] = sin(tempPhi); } else { d_out_cosPhi[idPadded] = 0; d_out_sinPhi[idPadded] = 0; } } } /* PURPOSE: Pre-compute the cos(phi) and sin(phi) with padding of 0 at boundary INPUTS: d_in_Phi: the ref phi iWidth, iHeight: iWidth = iImgWidth +2, iHeight = iImgWidth +2 */ __global__ void compute_cosPhi_sinPhi_double_kernel(double *d_out_cosPhi, double *d_out_sinPhi, double *d_in_Phi, const int iWidth, const int iHeight, const int iPaddedWidth, const int iPaddedHeight) { const int y = threadIdx.y + blockIdx.y * blockDim.y; const int x = threadIdx.x + blockIdx.x * blockDim.x; int idPadded = y*iPaddedWidth + x; int idImg = (y - 1)*iWidth + x - 1; if (y < iPaddedHeight && x < iPaddedWidth) { if (y > 0 && y < iPaddedHeight - 1 && x > 0 && x < iPaddedWidth - 1) { double tempPhi = d_in_Phi[idImg]; d_out_cosPhi[idPadded] = cos(tempPhi); d_out_sinPhi[idPadded] = sin(tempPhi); } else { d_out_cosPhi[idPadded] = 0; d_out_sinPhi[idPadded] = 0; } } } /* PURPOSE: Generate all matrix A and b for each pixel on GPU INPUTS: d_in_imgPadded: padded image d_in_cosPhi: padded cosPhi d_in_sinPhi: padded sinPhi iImgWidth, iImgHeight: image size iPaddedWidth, iPaddedHeight: padded size OUTPUTS: d_out_A: matrix A d_out_b: vector b */ __global__ void generate_A_b_kernel(float *d_out_A, float *d_out_b, const uchar *d_in_imgPadded, const float *d_in_cosphi, const float *d_in_sinphi, const int iImgWidth, const int iImgHeight, const int iPaddedWidth, const int iPaddedHeight) { const int y = threadIdx.y + (BLOCK_SIZE_16 - 2) * blockIdx.y; const int x = threadIdx.x + (BLOCK_SIZE_16 - 2) * blockIdx.x; float sum_cos = 0, sum_sin = 0, sum_sincos = 0, sum_sin2 = 0, sum_cos2 = 0; float sum_ft = 0, sum_ft_cos = 0, sum_ft_sin = 0; // Global Memory offset: every block actually begin with 2 overlapped pixels __shared__ float cos_phi_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; __shared__ float sin_phi_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; __shared__ uchar img_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; // Load the global mem to shared mem if (y < iPaddedHeight && x < iPaddedWidth) { cos_phi_sh[threadIdx.y][threadIdx.x] = d_in_cosphi[y*iPaddedWidth + x]; sin_phi_sh[threadIdx.y][threadIdx.x] = d_in_sinphi[y*iPaddedWidth + x]; img_sh[threadIdx.y][threadIdx.x] = d_in_imgPadded[y*iPaddedWidth + x]; } __syncthreads(); // Compute the results within the boundary if (y >= 1 && y < iPaddedHeight - 1 && x >= 1 && x < iPaddedWidth - 1 && threadIdx.x != 0 && threadIdx.x != BLOCK_SIZE_16 - 1 && threadIdx.y != 0 && threadIdx.y != BLOCK_SIZE_16 - 1) { int idA = ((y - 1)*iImgWidth + x - 1) * 9; int idb = ((y - 1)*iImgWidth + x - 1) * 3; sum_cos = 0, sum_sin = 0, sum_sincos = 0, sum_sin2 = 0, sum_cos2 = 0; sum_ft = 0, sum_ft_cos = 0, sum_ft_sin = 0; for (int i = threadIdx.y - 1; i <= threadIdx.y + 1; i++) { for (int j = threadIdx.x - 1; j <= threadIdx.x + 1; j++) { float cos_phi = cos_phi_sh[i][j]; float sin_phi = sin_phi_sh[i][j]; float ft = static_cast<float>(img_sh[i][j]); // Elements of A sum_cos += cos_phi; sum_sin += sin_phi; sum_sincos += cos_phi * sin_phi; sum_sin2 += sin_phi*sin_phi; sum_cos2 += cos_phi*cos_phi; // Elements of b sum_ft += ft; sum_ft_cos += ft * cos_phi; sum_ft_sin += ft * sin_phi; } } d_out_A[idA + 0] = 9; d_out_A[idA + 1] = 0; d_out_A[idA + 2] = 0; d_out_A[idA + 3] = sum_cos; d_out_A[idA + 4] = sum_cos2; d_out_A[idA + 5] = 0; d_out_A[idA + 6] = sum_sin; d_out_A[idA + 7] = sum_sincos; d_out_A[idA + 8] = sum_sin2; d_out_b[idb + 0] = sum_ft; d_out_b[idb + 1] = sum_ft_cos; d_out_b[idb + 2] = sum_ft_sin; } } /* PURPOSE: Generate all matrix A and b for each pixel on GPU INPUTS: d_in_imgPadded: padded image d_in_cosPhi: padded cosPhi d_in_sinPhi: padded sinPhi iImgWidth, iImgHeight: image size iPaddedWidth, iPaddedHeight: padded size OUTPUTS: d_out_A: matrix A d_out_b: vector b */ __global__ void generate_A_b_double_kernel(double *d_out_A, double *d_out_b, const uchar *d_in_imgPadded, const double *d_in_cosphi, const double *d_in_sinphi, const int iImgWidth, const int iImgHeight, const int iPaddedWidth, const int iPaddedHeight) { const int y = threadIdx.y + (BLOCK_SIZE_16 - 2) * blockIdx.y; const int x = threadIdx.x + (BLOCK_SIZE_16 - 2) * blockIdx.x; double sum_cos = 0, sum_sin = 0, sum_sincos = 0, sum_sin2 = 0, sum_cos2 = 0; double sum_ft = 0, sum_ft_cos = 0, sum_ft_sin = 0; // Global Memory offset: every block actually begin with 2 overlapped pixels __shared__ double cos_phi_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; __shared__ double sin_phi_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; __shared__ uchar img_sh[BLOCK_SIZE_16][BLOCK_SIZE_16]; // Load the global mem to shared mem if (y < iPaddedHeight && x < iPaddedWidth) { cos_phi_sh[threadIdx.y][threadIdx.x] = d_in_cosphi[y*iPaddedWidth + x]; sin_phi_sh[threadIdx.y][threadIdx.x] = d_in_sinphi[y*iPaddedWidth + x]; img_sh[threadIdx.y][threadIdx.x] = d_in_imgPadded[y*iPaddedWidth + x]; } __syncthreads(); // Compute the results within the boundary if (y >= 1 && y < iPaddedHeight - 1 && x >= 1 && x < iPaddedWidth - 1 && threadIdx.x != 0 && threadIdx.x != BLOCK_SIZE_16 - 1 && threadIdx.y != 0 && threadIdx.y != BLOCK_SIZE_16 - 1) { int idA = ((y - 1)*iImgWidth + x - 1) * 9; int idb = ((y - 1)*iImgWidth + x - 1) * 3; sum_cos = 0, sum_sin = 0, sum_sincos = 0, sum_sin2 = 0, sum_cos2 = 0; sum_ft = 0, sum_ft_cos = 0, sum_ft_sin = 0; for (int i = threadIdx.y - 1; i <= threadIdx.y + 1; i++) { for (int j = threadIdx.x - 1; j <= threadIdx.x + 1; j++) { double cos_phi = cos_phi_sh[i][j]; double sin_phi = sin_phi_sh[i][j]; double ft = static_cast<double>(img_sh[i][j]); // Elements of A sum_cos += cos_phi; sum_sin += sin_phi; sum_sincos += cos_phi * sin_phi; sum_sin2 += sin_phi*sin_phi; sum_cos2 += cos_phi*cos_phi; // Elements of b sum_ft += ft; sum_ft_cos += ft * cos_phi; sum_ft_sin += ft * sin_phi; } } d_out_A[idA + 0] = 9; d_out_A[idA + 1] = 0; d_out_A[idA + 2] = 0; d_out_A[idA + 3] = sum_cos; d_out_A[idA + 4] = sum_cos2; d_out_A[idA + 5] = 0; d_out_A[idA + 6] = sum_sin; d_out_A[idA + 7] = sum_sincos; d_out_A[idA + 8] = sum_sin2; d_out_b[idb + 0] = sum_ft; d_out_b[idb + 1] = sum_ft_cos; d_out_b[idb + 2] = sum_ft_sin; } } /* PURPOSE: Get the current & deltaPhi on device */ __global__ void get_deltaPhi_currPhi_kernel(float *d_out_deltaPhi, float *d_out_currPhi, float *d_in_dphiRef, float *d_in_refPhi, cufftComplex *d_in_filtered, const int iSize) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < iSize; i += blockDim.x * gridDim.x) { float temp = atan2f(d_in_filtered[i].y, d_in_filtered[i].x); float tempRefPhi = d_in_refPhi[i]; d_out_deltaPhi[i] = d_in_dphiRef[i] + temp; d_out_currPhi[i] = atan2f(sinf(temp + tempRefPhi), cos(temp + tempRefPhi)); } } /* PURPOSE: Get the current & deltaPhi on device */ __global__ void get_deltaPhi_currPhi_double_kernel(double *d_out_deltaPhi, double *d_out_currPhi, double *d_in_dphiRef, double *d_in_refPhi, cufftDoubleComplex *d_in_filtered, const int iSize) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < iSize; i += blockDim.x * gridDim.x) { double temp = atan2f(d_in_filtered[i].y, d_in_filtered[i].x); double tempRefPhi = d_in_refPhi[i]; d_out_deltaPhi[i] = d_in_dphiRef[i] + temp; d_out_currPhi[i] = atan2f(sinf(temp + tempRefPhi), cos(temp + tempRefPhi)); } } __global__ void update_dphiRef_kernel(float *d_out_dphiRef, const float *d_in_dphi, const int iSize) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < iSize; i += blockDim.x * gridDim.x) { d_out_dphiRef[i] += d_in_dphi[i]; } } __global__ void update_dphiRef_double_kernel(double *d_out_dphiRef, const double *d_in_dphi, const int iSize) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < iSize; i += blockDim.x * gridDim.x) { d_out_dphiRef[i] += d_in_dphi[i]; } } /*--------------------------------------End CUDA Kernels--------------------------------*/ void load_img_padding(uchar *d_out_img_Padded, const uchar *d_in_img, int iImgWidth, int iImgHeight, int iPaddedWidth, int iPaddedHeight, const dim3 &blocks, const dim3 &threads) { load_img_padding_kernel<<<blocks, threads>>>(d_out_img_Padded, d_in_img, iImgWidth, iImgHeight, iPaddedWidth, iPaddedHeight); getLastCudaError("load_img_padding_kernel launch failed!"); } void compute_cosPhi_sinPhi(float *d_out_cosPhi, float *d_out_sinPhi, float *d_in_Phi, const int iWidth, const int iHeight, const int iPaddedWidth, const int iPaddedHeight, const dim3 &blocks, const dim3 &threads) { compute_cosPhi_sinPhi_kernel<<<blocks, threads>>>(d_out_cosPhi, d_out_sinPhi, d_in_Phi, iWidth, iHeight, iPaddedWidth, iPaddedHeight); getLastCudaError("compute_cosPhi_sinPhi_kernel launch failed!"); } void compute_cosPhi_sinPhi(double *d_out_cosPhi, double *d_out_sinPhi, double *d_in_Phi, const int iWidth, const int iHeight, const int iPaddedWidth, const int iPaddedHeight, const dim3 &blocks, const dim3 &threads) { compute_cosPhi_sinPhi_double_kernel<<<blocks, threads>>>(d_out_cosPhi, d_out_sinPhi, d_in_Phi, iWidth, iHeight, iPaddedWidth, iPaddedHeight); getLastCudaError("compute_cosPhi_sinPhi_double_kernel launch failed!"); } void get_A_b(float *d_out_A, float *d_out_b, const uchar *d_in_imgPadded, const float *d_in_cosphi, const float *d_in_sinphi, const int iImgWidth, const int iImgHeight, const int iPaddedWidth, const int iPaddedHeight, const dim3 &blocks, const dim3 &threads) { generate_A_b_kernel<<<blocks, threads>>>(d_out_A, d_out_b, d_in_imgPadded, d_in_cosphi, d_in_sinphi, iImgWidth, iImgHeight, iPaddedWidth, iPaddedHeight); getLastCudaError("generate_A_b_kernel launch failed!"); } void get_A_b(double *d_out_A, double *d_out_b, const uchar *d_in_imgPadded, const double *d_in_cosphi, const double *d_in_sinphi, const int iImgWidth, const int iImgHeight, const int iPaddedWidth, const int iPaddedHeight, const dim3 &blocks, const dim3 &threads) { generate_A_b_double_kernel<<<blocks, threads>>>(d_out_A, d_out_b, d_in_imgPadded, d_in_cosphi, d_in_sinphi, iImgWidth, iImgHeight, iPaddedWidth, iPaddedHeight); getLastCudaError("generate_A_b_double_kernel launch failed!"); } void get_deltaPhi_currPhi(float *d_out_deltaPhi, float *d_out_currPhi, float *d_in_dphiRef, float *d_in_refPhi, cufftComplex *d_in_filtered, const int iSize) { get_deltaPhi_currPhi_kernel<<<8*32, 256>>>(d_out_deltaPhi, d_out_currPhi, d_in_dphiRef, d_in_refPhi, d_in_filtered, iSize); getLastCudaError("get_deltaPhi_currPhi_kernel launch failed!"); } void get_deltaPhi_currPhi(double *d_out_deltaPhi, double *d_out_currPhi, double *d_in_dphiRef, double *d_in_refPhi, cufftDoubleComplex *d_in_filtered, const int iSize) { get_deltaPhi_currPhi_double_kernel<<<8*32, 256>>>(d_out_deltaPhi, d_out_currPhi, d_in_dphiRef, d_in_refPhi, d_in_filtered, iSize); getLastCudaError("get_deltaPhi_currPhi_double_kernel launch failed!"); } void update_dphiRef(float *d_out_dphiRef, const float *d_in_dphi, const int iSize) { update_dphiRef_kernel<<<8*32, 256>>>(d_out_dphiRef, d_in_dphi, iSize); getLastCudaError("update_dphiRef_kernel launch failed!"); } void update_dphiRef(double *d_out_dphiRef, const double *d_in_dphi, const int iSize) { update_dphiRef_double_kernel<<<8*32, 256>>>(d_out_dphiRef, d_in_dphi, iSize); getLastCudaError("update_dphiRef_double_kernel launch failed!"); } } // namespace DPRA
cda5bc9c6344cde564968c62f6f84e67095f2112.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "kernel.h" __global__ void kernel(uchar4 *ptr, GLdouble min, GLdouble max) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < DIMX && y < DIMY) { int offset = x + y * blockDim.x * gridDim.x; GLdouble a, b, color; a = min + ((GLdouble)x / DIMX * (max - min)); b = (min*(GLdouble)DIMY/ (GLdouble)DIMX) + ((GLdouble)y / DIMY * ((max*(GLdouble)DIMY / (GLdouble)DIMX) - (min*(GLdouble)DIMY / (GLdouble)DIMX))); //GLdouble ca = a; //GLdouble cb = b; //GLdouble ca = 0.36024; //GLdouble cb = -0.64131; GLdouble ca = 0.0; GLdouble cb = -0.8; /* Check for divergence */ int iter = 0; for (iter = 0; iter < 100; iter++) { GLdouble real = (a*a) - (b*b); GLdouble imag = 2 * a * b; a = real + ca; b = imag + cb; if ((a*a) + (b*b) > 256.0f) { break; } } /* Draw the pixel */ if (iter == 100) iter = 0; ptr[offset].x = 0; ptr[offset].y = (GLdouble)iter / 100.0 * (255.0); ptr[offset].z = 0; ptr[offset].w = 255; } } void gpuMandelbrotSet(dim3 grids, dim3 threads, uchar4 *devPtr, GLdouble min, GLdouble max) { hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads), 0, 0, devPtr, min, max); } void cpuMandelbrotSet(uchar4* ptr, GLdouble min, GLdouble max) { for (int x = 0; x < DIMX; x++) { for (int y = 0; y < DIMY; y++) { int offset = x + y * DIMX; GLdouble a, b, color; a = min + ((GLdouble)x / DIMX * (max - min)); b = (min*(GLdouble)DIMY / (GLdouble)DIMX) + ((GLdouble)y / DIMY * ((max*(GLdouble)DIMY / (GLdouble)DIMX) - (min*(GLdouble)DIMY / (GLdouble)DIMX))); //GLdouble ca = a; //GLdouble cb = b; GLdouble ca = 0.36024; GLdouble cb = -0.64131; //GLdouble ca = 0.0; //GLdouble cb = -0.8; /* Check for divergence */ int iter = 0; for (iter = 0; iter < 100; iter++) { GLdouble real = (a*a) - (b*b); GLdouble imag = 2 * a * b; a = real + ca; b = imag + cb; if ((a*a) + (b*b) > 256.0f) { break; } } /* Draw the pixel */ if (iter == 100) iter = 0; ptr[offset].x = 0; ptr[offset].y = (GLdouble)iter / 100.0 * (255.0); ptr[offset].z = 0; ptr[offset].w = 255; } } }
cda5bc9c6344cde564968c62f6f84e67095f2112.cu
#include "device_launch_parameters.h" #include "kernel.h" __global__ void kernel(uchar4 *ptr, GLdouble min, GLdouble max) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < DIMX && y < DIMY) { int offset = x + y * blockDim.x * gridDim.x; GLdouble a, b, color; a = min + ((GLdouble)x / DIMX * (max - min)); b = (min*(GLdouble)DIMY/ (GLdouble)DIMX) + ((GLdouble)y / DIMY * ((max*(GLdouble)DIMY / (GLdouble)DIMX) - (min*(GLdouble)DIMY / (GLdouble)DIMX))); //GLdouble ca = a; //GLdouble cb = b; //GLdouble ca = 0.36024; //GLdouble cb = -0.64131; GLdouble ca = 0.0; GLdouble cb = -0.8; /* Check for divergence */ int iter = 0; for (iter = 0; iter < 100; iter++) { GLdouble real = (a*a) - (b*b); GLdouble imag = 2 * a * b; a = real + ca; b = imag + cb; if ((a*a) + (b*b) > 256.0f) { break; } } /* Draw the pixel */ if (iter == 100) iter = 0; ptr[offset].x = 0; ptr[offset].y = (GLdouble)iter / 100.0 * (255.0); ptr[offset].z = 0; ptr[offset].w = 255; } } void gpuMandelbrotSet(dim3 grids, dim3 threads, uchar4 *devPtr, GLdouble min, GLdouble max) { kernel<<<grids, threads>>>(devPtr, min, max); } void cpuMandelbrotSet(uchar4* ptr, GLdouble min, GLdouble max) { for (int x = 0; x < DIMX; x++) { for (int y = 0; y < DIMY; y++) { int offset = x + y * DIMX; GLdouble a, b, color; a = min + ((GLdouble)x / DIMX * (max - min)); b = (min*(GLdouble)DIMY / (GLdouble)DIMX) + ((GLdouble)y / DIMY * ((max*(GLdouble)DIMY / (GLdouble)DIMX) - (min*(GLdouble)DIMY / (GLdouble)DIMX))); //GLdouble ca = a; //GLdouble cb = b; GLdouble ca = 0.36024; GLdouble cb = -0.64131; //GLdouble ca = 0.0; //GLdouble cb = -0.8; /* Check for divergence */ int iter = 0; for (iter = 0; iter < 100; iter++) { GLdouble real = (a*a) - (b*b); GLdouble imag = 2 * a * b; a = real + ca; b = imag + cb; if ((a*a) + (b*b) > 256.0f) { break; } } /* Draw the pixel */ if (iter == 100) iter = 0; ptr[offset].x = 0; ptr[offset].y = (GLdouble)iter / 100.0 * (255.0); ptr[offset].z = 0; ptr[offset].w = 255; } } }
7ebc9626976ef258f9be5e295d916efb796d01ec.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** * Copyright 2018 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "modules/perception/inference/utils/cuda_util.h" #include <boost/thread.hpp> #include <hip/hip_runtime_api.h> #include "cyber/common/log.h" namespace apollo { namespace perception { namespace inference { static boost::thread_specific_ptr<CudaUtil> thread_instance_; #define CUBLAS_CHECK(condition) \ do { \ hipblasStatus_t status = condition; \ CHECK_EQ(status, HIPBLAS_STATUS_SUCCESS) << " " << status; \ } while (0) CudaUtil &CudaUtil::get() { if (!thread_instance_.get()) { thread_instance_.reset(new CudaUtil); } return *(thread_instance_.get()); } CudaUtil::CudaUtil() { CUBLAS_CHECK(hipblasCreate(&cublas_handle_)); } bool CudaUtil::set_device_id(int device_id) { int now_device = -1; auto cuda_error = hipGetDevice(&now_device); CHECK_EQ(cuda_error, hipSuccess) << " " << hipGetErrorString(cuda_error); if (now_device == device_id) { return true; } else { cuda_error = hipSetDevice(device_id); CHECK_EQ(cuda_error, hipSuccess) << " " << hipGetErrorString(cuda_error); if (get().cublas_handle_ != nullptr) { CUBLAS_CHECK(hipblasDestroy(get().cublas_handle_)); } CUBLAS_CHECK(hipblasCreate(&get().cublas_handle_)); } return true; } hipblasHandle_t &CudaUtil::get_handler() { return get().cublas_handle_; } CudaUtil::~CudaUtil() { if (get().cublas_handle_) { CUBLAS_CHECK(hipblasDestroy(get().cublas_handle_)); } } } // namespace inference } // namespace perception } // namespace apollo
7ebc9626976ef258f9be5e295d916efb796d01ec.cu
/****************************************************************************** * Copyright 2018 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "modules/perception/inference/utils/cuda_util.h" #include <boost/thread.hpp> #include <cuda_runtime_api.h> #include "cyber/common/log.h" namespace apollo { namespace perception { namespace inference { static boost::thread_specific_ptr<CudaUtil> thread_instance_; #define CUBLAS_CHECK(condition) \ do { \ cublasStatus_t status = condition; \ CHECK_EQ(status, CUBLAS_STATUS_SUCCESS) << " " << status; \ } while (0) CudaUtil &CudaUtil::get() { if (!thread_instance_.get()) { thread_instance_.reset(new CudaUtil); } return *(thread_instance_.get()); } CudaUtil::CudaUtil() { CUBLAS_CHECK(cublasCreate(&cublas_handle_)); } bool CudaUtil::set_device_id(int device_id) { int now_device = -1; auto cuda_error = cudaGetDevice(&now_device); CHECK_EQ(cuda_error, cudaSuccess) << " " << cudaGetErrorString(cuda_error); if (now_device == device_id) { return true; } else { cuda_error = cudaSetDevice(device_id); CHECK_EQ(cuda_error, cudaSuccess) << " " << cudaGetErrorString(cuda_error); if (get().cublas_handle_ != nullptr) { CUBLAS_CHECK(cublasDestroy(get().cublas_handle_)); } CUBLAS_CHECK(cublasCreate(&get().cublas_handle_)); } return true; } cublasHandle_t &CudaUtil::get_handler() { return get().cublas_handle_; } CudaUtil::~CudaUtil() { if (get().cublas_handle_) { CUBLAS_CHECK(cublasDestroy(get().cublas_handle_)); } } } // namespace inference } // namespace perception } // namespace apollo
d1beacf68d511d284eebd30ac32598c2badb37b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void matmulKernel(float *A, float *B, float *C, int rA, int cA, int cB){ int i = blockIdx.y*gridDim.x + blockIdx.x, j = threadIdx.y*blockDim.x + threadIdx.x; if(i < rA && j < cB){ C[i*cB + j] = 0.; for(int k=0;k<cA;++k) C[i*cB + j] += A[i*cA + k] * B[k*cB + j]; } return; }
d1beacf68d511d284eebd30ac32598c2badb37b5.cu
#include "includes.h" __global__ void matmulKernel(float *A, float *B, float *C, int rA, int cA, int cB){ int i = blockIdx.y*gridDim.x + blockIdx.x, j = threadIdx.y*blockDim.x + threadIdx.x; if(i < rA && j < cB){ C[i*cB + j] = 0.; for(int k=0;k<cA;++k) C[i*cB + j] += A[i*cA + k] * B[k*cB + j]; } return; }
8d61fab7b34f44604f3b50ad0b628fc5d6745f75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __device__ static void TC(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int number) { // float C=200; float k=1.6; float vr=-60; float vt=-50; float G_up=2.0; float G_down=2.0; float a=0.1; float b=15; float c=-60; float d=10; float v_peak=40; float I; float v=neuro[number].v; float u=neuro[number].u; I=Ix[number].I; //Izhikevich model if(v>-65){b=0;}else{b=15;} v=v+tau*(k*(v-vr)*(v-vt)-u+I)/C; u=u+tau*a*(b*(v-vr)-u); spike[number]=0; if(v>v_peak) { v=c; u=u+d; spike[number]=1; } // if(number==840000) // {printf("I0=%f,I1=%f,I2=%f,I_distal=%f,I_proximal=%f,I=%f,v_distal=%f,v_proximal=%f,v=%f\n",I0,I1,I2,I_distal,I_proximal,I,v_distal,v_proximal,v);} neuro[number].v=v; neuro[number].u=u; Ix[number].I=0; } __global__ static void TC_neuron(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int *boxnum, int *THREAD_NUM, int *BLOCK_NUM) { const int tid = threadIdx.x; const int bid = blockIdx.x; int number=(THREAD_NUM[0]*BLOCK_NUM[0]+THREAD_NUM[1]*BLOCK_NUM[1])*10+(bid * THREAD_NUM[2] + tid)*10; /*****************/ if((number+0)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+0);} /****************/ if((number+1)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+1);} /****************/ if((number+2)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+2);} /*****************/ if((number+3)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+3);} /*****************/ if((number+4)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+4);} /*****************/ if((number+5)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+5);} /****************/ if((number+6)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+6);} /*****************/ if((number+7)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+7);} /*****************/ if((number+8)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+8);} /*****************/ if((number+9)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+9);} }
8d61fab7b34f44604f3b50ad0b628fc5d6745f75.cu
#include "cuda_runtime.h" #include <stdio.h> __device__ static void TC(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int number) { //设置神经元计算参数 float C=200; float k=1.6; float vr=-60; float vt=-50; float G_up=2.0; float G_down=2.0; float a=0.1; float b=15; float c=-60; float d=10; float v_peak=40; float I; float v=neuro[number].v; float u=neuro[number].u; I=Ix[number].I; //Izhikevich model if(v>-65){b=0;}else{b=15;} v=v+tau*(k*(v-vr)*(v-vt)-u+I)/C; u=u+tau*a*(b*(v-vr)-u); spike[number]=0; if(v>v_peak) { v=c; u=u+d; spike[number]=1; } // if(number==840000) // {printf("I0=%f,I1=%f,I2=%f,I_distal=%f,I_proximal=%f,I=%f,v_distal=%f,v_proximal=%f,v=%f\n",I0,I1,I2,I_distal,I_proximal,I,v_distal,v_proximal,v);} neuro[number].v=v; neuro[number].u=u; Ix[number].I=0; } __global__ static void TC_neuron(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int *boxnum, int *THREAD_NUM, int *BLOCK_NUM) { const int tid = threadIdx.x; const int bid = blockIdx.x; int number=(THREAD_NUM[0]*BLOCK_NUM[0]+THREAD_NUM[1]*BLOCK_NUM[1])*10+(bid * THREAD_NUM[2] + tid)*10; /********第一个神经元虚拟计算内核*********/ if((number+0)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+0);} /********第二个神经元虚拟计算内核********/ if((number+1)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+1);} /********第三个神经元虚拟计算内核********/ if((number+2)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+2);} /********第四个神经元虚拟计算内核*********/ if((number+3)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+3);} /********第五个神经元虚拟计算内核*********/ if((number+4)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+4);} /********第六个神经元虚拟计算内核*********/ if((number+5)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+5);} /********第七个神经元虚拟计算内核********/ if((number+6)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+6);} /********第八个神经元虚拟计算内核*********/ if((number+7)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+7);} /********第九个神经元虚拟计算内核*********/ if((number+8)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+8);} /********第十个神经元虚拟计算内核*********/ if((number+9)<=boxnum[2]) {TC(input,neuro,spike,Ix,number+9);} }
ff1e7d87f4ad20127517f8f5c88a2ac9ab3a97c3.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdlib> using namespace std; #include <sys/time.h> #include <hip/hip_runtime.h> #include <unistd.h> #include <stdlib.h> union FP32 { unsigned int i; float f; }; __global__ void test(float* d, float* a, float* b){ float d_array[4]; for (int i = 0; i < 4; ++i) { d_array[i] = d[i]; } asm volatile( ".reg .b32 ra<4>, rd<4>;\n\t" "wgmma.fence.sync.aligned;\n\t" "wgmma.mma_async.sync.aligned.m64n8k32.f32.e4m3.e5m2 {%0, %1, %2, %3}, %4, %5, 0, -1, -1;\n\t" "wgmma.commit_group.sync.aligned;\n\t" "wgmma.wait_group.sync.aligned 0;\n\t" : "+f"(d_array[0]), "+f"(d_array[1]), "+f"(d_array[2]), "+f"(d_array[3]) : "l"(a),"l"(b) ); for (int i = 0; i < 4; ++i) { d[i] = d_array[i]; } } void InitOne(float * a, const int n) { FP32 fp32; fp32.i = 0x38383838; for ( int i = 0; i < n; i++ ) { a[i] = fp32.f; } } void InitZero(float * a, const int n) { for ( int i = 0; i < n; i++ ) { a[i] = 0.0; } } void show(float * a, const int n) { for ( int i=0; i<n; i++){ std::cout << a[i] << std::endl; } std::cout << std::endl; } int main(int argc, char** argv){ int size = 512; float* host_a=(float*)malloc(sizeof(float) * size); float* host_b=(float*)malloc(sizeof(float) * size); float* host_d=(float*)malloc(sizeof(float) * size); float* device_a=NULL; float* device_b=NULL; float* device_d=NULL; hipMalloc((void**)(&device_a), sizeof(float) * size); hipMalloc((void**)(&device_b), sizeof(float) * size); hipMalloc((void**)(&device_d), sizeof(float) * size); for(int i=0;i<size;i++){ host_a[i] =0.0; host_d[i] = 0.0; } InitOne(host_b, size); FP32 fp32; fp32.i = 0x70400000; host_a[0]=fp32.f; hipMemcpy((void*)device_a, (void*)host_a, sizeof(float)* size, hipMemcpyHostToDevice); hipMemcpy((void*)device_b, (void*)host_b, sizeof(float)* size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( test), dim3(4),dim3(128), 0, 0, device_d, device_a, device_b); hipDeviceSynchronize(); hipMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); fp32.f=host_d[0]; std::cout<< hex << fp32.i << std::endl; //show(host_d, size); }
ff1e7d87f4ad20127517f8f5c88a2ac9ab3a97c3.cu
#include <iostream> #include <cstdlib> using namespace std; #include <sys/time.h> #include <cuda.h> #include <unistd.h> #include <stdlib.h> union FP32 { unsigned int i; float f; }; __global__ void test(float* d, float* a, float* b){ float d_array[4]; for (int i = 0; i < 4; ++i) { d_array[i] = d[i]; } asm volatile( ".reg .b32 ra<4>, rd<4>;\n\t" "wgmma.fence.sync.aligned;\n\t" "wgmma.mma_async.sync.aligned.m64n8k32.f32.e4m3.e5m2 {%0, %1, %2, %3}, %4, %5, 0, -1, -1;\n\t" "wgmma.commit_group.sync.aligned;\n\t" "wgmma.wait_group.sync.aligned 0;\n\t" : "+f"(d_array[0]), "+f"(d_array[1]), "+f"(d_array[2]), "+f"(d_array[3]) : "l"(a),"l"(b) ); for (int i = 0; i < 4; ++i) { d[i] = d_array[i]; } } void InitOne(float * a, const int n) { FP32 fp32; fp32.i = 0x38383838; for ( int i = 0; i < n; i++ ) { a[i] = fp32.f; } } void InitZero(float * a, const int n) { for ( int i = 0; i < n; i++ ) { a[i] = 0.0; } } void show(float * a, const int n) { for ( int i=0; i<n; i++){ std::cout << a[i] << std::endl; } std::cout << std::endl; } int main(int argc, char** argv){ int size = 512; float* host_a=(float*)malloc(sizeof(float) * size); float* host_b=(float*)malloc(sizeof(float) * size); float* host_d=(float*)malloc(sizeof(float) * size); float* device_a=NULL; float* device_b=NULL; float* device_d=NULL; cudaMalloc((void**)(&device_a), sizeof(float) * size); cudaMalloc((void**)(&device_b), sizeof(float) * size); cudaMalloc((void**)(&device_d), sizeof(float) * size); for(int i=0;i<size;i++){ host_a[i] =0.0; host_d[i] = 0.0; } InitOne(host_b, size); FP32 fp32; fp32.i = 0x70400000; host_a[0]=fp32.f; cudaMemcpy((void*)device_a, (void*)host_a, sizeof(float)* size, cudaMemcpyHostToDevice); cudaMemcpy((void*)device_b, (void*)host_b, sizeof(float)* size, cudaMemcpyHostToDevice); test<<<4,128>>>(device_d, device_a, device_b); cudaDeviceSynchronize(); cudaMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); fp32.f=host_d[0]; std::cout<< hex << fp32.i << std::endl; //show(host_d, size); }
9092f0a0f1df64314209e07c061a5050b1accecd.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 16, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
9092f0a0f1df64314209e07c061a5050b1accecd.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 16, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
389e5ff92d93eb1db1bd5e1b3a39834d766c454e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <test_utils.h> #include <metrics/batched/information_criterion.cuh> #include <raft/cudart_utils.h> #include <raft/mr/device/allocator.hpp> #include <gtest/gtest.h> #include <cmath> #include <random> #include <vector> namespace MLCommon { namespace Metrics { namespace Batched { template <typename T> void naive_ic( T* h_ic, const T* h_loglike, IC_Type ic_type, int n_params, int batch_size, int n_samples) { T ic_base{}; T N = static_cast<T>(n_params); T M = static_cast<T>(n_samples); switch (ic_type) { case AIC: ic_base = (T)2 * N; break; case AICc: ic_base = (T)2 * (N + (N * (N + (T)1)) / (M - N - (T)1)); break; case BIC: ic_base = ::log(M) * N; break; } #pragma omp parallel for for (int bid = 0; bid < batch_size; bid++) { h_ic[bid] = ic_base - (T)2.0 * h_loglike[bid]; } } template <typename T> struct BatchedICInputs { int batch_size; int n_params; int n_samples; IC_Type ic_type; T tolerance; }; template <typename T> class BatchedICTest : public ::testing::TestWithParam<BatchedICInputs<T>> { protected: void SetUp() override { using std::vector; params = ::testing::TestWithParam<BatchedICInputs<T>>::GetParam(); // Create stream and allocator CUDA_CHECK(hipStreamCreate(&stream)); allocator = std::make_shared<raft::mr::device::default_allocator>(); // Create arrays std::vector<T> loglike_h = std::vector<T>(params.batch_size); res_h.resize(params.batch_size); T* loglike_d = (T*)allocator->allocate(sizeof(T) * params.batch_size, stream); res_d = (T*)allocator->allocate(sizeof(T) * params.batch_size, stream); // Generate random data std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> udis(0.001, 1.0); // 0 has no log for (int i = 0; i < params.batch_size; i++) loglike_h[i] = ::log(udis(gen)); // Copy the data to the device raft::update_device(loglike_d, loglike_h.data(), params.batch_size, stream); // Compute the tested results information_criterion(res_d, loglike_d, params.ic_type, params.n_params, params.batch_size, params.n_samples, stream); // Compute the expected results naive_ic(res_h.data(), loglike_h.data(), params.ic_type, params.n_params, params.batch_size, params.n_samples); allocator->deallocate(loglike_d, sizeof(T) * params.batch_size, stream); } void TearDown() override { allocator->deallocate(res_d, sizeof(T) * params.batch_size, stream); CUDA_CHECK(hipStreamDestroy(stream)); } protected: std::shared_ptr<raft::mr::device::default_allocator> allocator; BatchedICInputs<T> params; T* res_d; std::vector<T> res_h; hipStream_t stream = 0; }; // Test parameters (op, n_batches, m, n, p, q, tolerance) const std::vector<BatchedICInputs<double>> inputsd = { {1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}}; // Test parameters (op, n_batches, m, n, p, q, tolerance) const std::vector<BatchedICInputs<float>> inputsf = { {1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}}; using BatchedICTestD = BatchedICTest<double>; using BatchedICTestF = BatchedICTest<float>; TEST_P(BatchedICTestD, Result) { ASSERT_TRUE(devArrMatchHost( res_h.data(), res_d, params.batch_size, raft::CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedICTestF, Result) { ASSERT_TRUE(devArrMatchHost( res_h.data(), res_d, params.batch_size, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestF, ::testing::ValuesIn(inputsf)); } // namespace Batched } // namespace Metrics } // namespace MLCommon
389e5ff92d93eb1db1bd5e1b3a39834d766c454e.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <test_utils.h> #include <metrics/batched/information_criterion.cuh> #include <raft/cudart_utils.h> #include <raft/mr/device/allocator.hpp> #include <gtest/gtest.h> #include <cmath> #include <random> #include <vector> namespace MLCommon { namespace Metrics { namespace Batched { template <typename T> void naive_ic( T* h_ic, const T* h_loglike, IC_Type ic_type, int n_params, int batch_size, int n_samples) { T ic_base{}; T N = static_cast<T>(n_params); T M = static_cast<T>(n_samples); switch (ic_type) { case AIC: ic_base = (T)2 * N; break; case AICc: ic_base = (T)2 * (N + (N * (N + (T)1)) / (M - N - (T)1)); break; case BIC: ic_base = std::log(M) * N; break; } #pragma omp parallel for for (int bid = 0; bid < batch_size; bid++) { h_ic[bid] = ic_base - (T)2.0 * h_loglike[bid]; } } template <typename T> struct BatchedICInputs { int batch_size; int n_params; int n_samples; IC_Type ic_type; T tolerance; }; template <typename T> class BatchedICTest : public ::testing::TestWithParam<BatchedICInputs<T>> { protected: void SetUp() override { using std::vector; params = ::testing::TestWithParam<BatchedICInputs<T>>::GetParam(); // Create stream and allocator CUDA_CHECK(cudaStreamCreate(&stream)); allocator = std::make_shared<raft::mr::device::default_allocator>(); // Create arrays std::vector<T> loglike_h = std::vector<T>(params.batch_size); res_h.resize(params.batch_size); T* loglike_d = (T*)allocator->allocate(sizeof(T) * params.batch_size, stream); res_d = (T*)allocator->allocate(sizeof(T) * params.batch_size, stream); // Generate random data std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> udis(0.001, 1.0); // 0 has no log for (int i = 0; i < params.batch_size; i++) loglike_h[i] = std::log(udis(gen)); // Copy the data to the device raft::update_device(loglike_d, loglike_h.data(), params.batch_size, stream); // Compute the tested results information_criterion(res_d, loglike_d, params.ic_type, params.n_params, params.batch_size, params.n_samples, stream); // Compute the expected results naive_ic(res_h.data(), loglike_h.data(), params.ic_type, params.n_params, params.batch_size, params.n_samples); allocator->deallocate(loglike_d, sizeof(T) * params.batch_size, stream); } void TearDown() override { allocator->deallocate(res_d, sizeof(T) * params.batch_size, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: std::shared_ptr<raft::mr::device::default_allocator> allocator; BatchedICInputs<T> params; T* res_d; std::vector<T> res_h; cudaStream_t stream = 0; }; // Test parameters (op, n_batches, m, n, p, q, tolerance) const std::vector<BatchedICInputs<double>> inputsd = { {1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}}; // Test parameters (op, n_batches, m, n, p, q, tolerance) const std::vector<BatchedICInputs<float>> inputsf = { {1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}}; using BatchedICTestD = BatchedICTest<double>; using BatchedICTestF = BatchedICTest<float>; TEST_P(BatchedICTestD, Result) { ASSERT_TRUE(devArrMatchHost( res_h.data(), res_d, params.batch_size, raft::CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedICTestF, Result) { ASSERT_TRUE(devArrMatchHost( res_h.data(), res_d, params.batch_size, raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestF, ::testing::ValuesIn(inputsf)); } // namespace Batched } // namespace Metrics } // namespace MLCommon
9d975a6e75fe92298acbabbbcb41f0708bc8dd1b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ATen/ATen.h" #include "ATen/hip/HIPApplyUtils.cuh" #include "ATen/hip/HIPContext.h" #include "ATen/NativeFunctions.h" #include "ATen/TensorUtils.h" #include "ATen/Utils.h" #include "c10/util/Exception.h" #include <THH/THHGeneral.h> #include "THH/THHNumerics.cuh" #include <ATen/native/hip/LaunchUtils.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <algorithm> #include <cfloat> #include <cmath> #define START_IND(a,b,c) (int)::floor((float)(a * c) / b) #define END_IND(a,b,c) (int)::ceil((float)((a + 1) * c) / b) #define START_IND_INT(a,b,c) ((a * c) / b) #define END_IND_INT(a,b,c) (((a + 1) * c + b - 1) / b) // #define START_IND(a,b,c) a * c / b // #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched namespace at { namespace native { namespace { // 4d tensor B x D x H x W // All kernels view batch dim B and feature dim D as collapsed. /* * Description: * this function adaptively average pools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output */ template <typename T> __global__ void adaptive_average_pool(T *input, T *output, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators on output pixels int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling over corresponding input pixels T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; T sum = ScalarConvert<int, T>::to(0); int ih, iw; for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { T val = ptr_input[iw*istrideW]; sum += val; } ptr_input += istrideH; // next input line } // Update output *ptr_output = sum / kH / kW; } } } /* * Description: * this function computes the gradInput from gradOutput */ template <typename T> __global__ void adaptive_average_gradinput( T *gradInput, T *gradOutput, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators on input pixels int ih, iw; // select input/output plane based on thread/block ID int i_plane = blockIdx.x; int o_plane = i_plane; gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; int istartH = blockDim.y*blockIdx.y + threadIdx.y; int iendH = isizeH; int istepH = blockDim.y*gridDim.y; int istartW = threadIdx.x; int iendW = isizeW; int istepW = blockDim.x; // compute gradInput for(ih = istartH; ih < iendH; ih += istepH) { int ostartH = START_IND(ih, isizeH, osizeH); int oendH = END_IND(ih, isizeH, osizeH); for(iw = istartW; iw < iendW; iw += istepW) { int ostartW = START_IND(iw, isizeW, osizeW); int oendW = END_IND(iw, isizeW, osizeW); // Compute the gradients over corresponding output pixels T *ptr_gradInput = gradInput + ih*isizeW + iw; int oh, ow; for(oh = ostartH; oh < oendH; ++oh) { int kH = START_IND(oh, osizeH, isizeH) - END_IND(oh, osizeH, isizeH); for(ow = ostartW; ow < oendW; ++ow) { int kW = START_IND(ow, osizeW, isizeW) - END_IND(ow, osizeW, isizeW); T grad_delta = gradOutput[ow + oh*osizeW] / kH / kW; *ptr_gradInput += grad_delta; } } } } } /* * Description: * this function computes the gradInput from gradOutput * (uses atomic add) */ template <typename T> __global__ void atomic_adaptive_average_gradinput( T *gradInput, T *gradOutput, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators on output indices int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; gradOutput = gradOutput + o_plane*osizeW*osizeH; gradInput = gradInput + i_plane*isizeW*isizeH; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients for over corresponding input pixels T *ptr_gradInput = gradInput + istartH*isizeW + istartW; T *ptr_gradOutput = gradOutput + oh*osizeW + ow; T grad_delta = *ptr_gradOutput / kW / kH; int ih, iw; for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { // atomic add since different threads could update same variable atomicAdd(&(ptr_gradInput[iw]), grad_delta); } ptr_gradInput += isizeW; // next input line } } } } /* * Description: * this function adaptively average pools an input 4D tensor along dimensions 2 and 3 * NHWC layout for both input and output tensor * 4D input, 4D output */ template <typename index_t, typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void adaptive_average_pool_nhwc(const scalar_t* __restrict__ input, scalar_t* __restrict__ output, int sizeB, int sizeC, int isizeH, int isizeW, int osizeH, int osizeW, int kernel_stride_C, int kernel_size_C, index_t istrideB, index_t istrideC, index_t istrideH, index_t istrideW) { extern __shared__ int smem[]; scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); // each CTA handles a portion of a single slice on batch dimension; int batch_id = blockIdx.x % sizeB; int channel_id = blockIdx.x / sizeB; // each CTA handles a single slice on batch dimension; // We use gridDim.x to handle striding on C as well. output = output + batch_id * osizeH * osizeW * sizeC; input = input + batch_id * istrideB; // split out_cached and exclusively it assigned to each thread; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C * blockDim.x]; // iterate on output H & W. // Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on // tile so there's a better chance to hit L1 cache. index_t oH = (osizeH + gridDim.z-1) / gridDim.z; index_t oW = (osizeW + gridDim.y-1) / gridDim.y; index_t ostartH = threadIdx.z + blockIdx.z*oH; index_t oendH = ::min(ostartH+oH, osizeH); index_t ostartW = threadIdx.y + blockIdx.y*oW; index_t oendW = ::min(ostartW+oW, osizeW); // Stride for threads, each warp can reuse L1 as they go. So theoretically // better chance to survive cache eviction. for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { int istartH = START_IND_INT(oh, osizeH, isizeH); int iendH = END_IND_INT(oh, osizeH, isizeH); for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int istartW = START_IND_INT(ow, osizeW, isizeW); int iendW = END_IND_INT(ow, osizeW, isizeW); scalar_t factor = scalar_t(1.0) / ((iendH-istartH) * (iendW-istartW)); // loop on input: hierarchy h->w->c, use shared memory here hopefully // would not stall global memory read; for (index_t ih = istartH; ih < iendH; ih++) { for (index_t iw = istartW; iw < iendW; iw++) { int cached_index = threadIdx.x; const scalar_t *ptr_input = input + ih*istrideH + iw*istrideW; for (index_t c = threadIdx.x + channel_id*blockDim.x; c < sizeC; c += blockDim.x*kernel_stride_C) { out_cached[cached_index] += ptr_input[c*istrideC]; cached_index += blockDim.x; } } } scalar_t *ptr_output = output + (oh * osizeW + ow) * sizeC; int cached_index = threadIdx.x; // write accumulated output to global memory; for (index_t c = threadIdx.x + channel_id*blockDim.x; c < sizeC; c += blockDim.x*kernel_stride_C) { // This causes numerical issueptr when unit test with NCHW kernel; // switch to could verify the correctness; // output[c] = out_cached[c] / (iendH-istartH) / (iendW-istartW); ptr_output[c] = out_cached[cached_index] * factor; out_cached[cached_index] = scalar_t(0.0); cached_index += blockDim.x; } // no need to __syncthreads() since out_cached is not shared. } } } /* * Description: * this function computes the gradInput from gradOutput * NHWC layout for both input and output tensor * 4D input, 4D output */ template <typename index_t, typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void adaptive_average_gradinput_nhwc(scalar_t* __restrict__ gradInput, const scalar_t* __restrict__ gradOutput, int sizeB, int sizeC, int isizeH, int isizeW, int osizeH, int osizeW, int kernel_stride_C, int kernel_size_C, index_t ostrideB, index_t ostrideC, index_t ostrideH, index_t ostrideW) { extern __shared__ int smem[]; index_t *ostartW_cached = smem; index_t *oendW_cached = &ostartW_cached[isizeW]; // be careful with alignment, in case scalar_t is fp16, we want to assign // int pointers first. scalar_t *r_kW_cached = reinterpret_cast<scalar_t*>(&oendW_cached[isizeW]); scalar_t *r_kH_cached = &r_kW_cached[osizeW]; scalar_t *out_cached = &r_kH_cached[osizeH]; // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // Precompute output start/end index per input index on width dimension; // Not doing this for height dimension, as that's our out-most loop. for (index_t i = thread_id; i < isizeW; i+= block_size) { ostartW_cached[i] = START_IND_INT(i, isizeW, osizeW); oendW_cached[i] = END_IND_INT(i, isizeW, osizeW); } // Precompute pooling height/weight factor for each output element; // This is used to weight output gradient when accumulate them on input // gradient. // Technically we don't have to compute it for the whole `osizeH`, since // each cta only covers a consecutive portion of the entire output. But it's // not going to save us from code divergence, and shared memory save is not // an issue neither, so just leave it as is for now. for (index_t i = thread_id; i < osizeH; i+= block_size) { r_kH_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeH, isizeH) - START_IND_INT(i, osizeH, isizeH)); } for (index_t i = thread_id; i < osizeW; i+= block_size) { r_kW_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeW, isizeW) - START_IND_INT(i, osizeW, isizeW)); } // each CTA handles a portion of a single slice on batch dimension; int batch_id = blockIdx.x % sizeB; int channel_id = blockIdx.x / sizeB; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); // each CTA handles a portion of a single slice on batch dimension; // We use gridDim.x to handle striding on C as well. gradInput = gradInput + batch_id * isizeH * isizeW * sizeC; gradOutput = gradOutput + batch_id * ostrideB; // split out_cached and exclusively it assigned to each thread; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x * kernel_size_C]; // iterate on input H & W. // Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on // tile so there's a better chance to hit L1 cache. index_t iH = (isizeH + gridDim.z-1) / gridDim.z; index_t iW = (isizeW + gridDim.y-1) / gridDim.y; index_t istartH = threadIdx.z + blockIdx.z*iH; index_t iendH = ::min(istartH+iH, isizeH); index_t istartW = threadIdx.y + blockIdx.y*iW; index_t iendW = ::min(istartW+iW, isizeW); // Stride for threads, each warp can reuse L1 as they go. So theoretically // better chance to survive cache eviction. for (index_t ih = istartH; ih < iendH; ih+=blockDim.z) { index_t ostartH = START_IND_INT(ih, isizeH, osizeH); index_t oendH = END_IND_INT(ih, isizeH, osizeH); for (index_t iw = istartW; iw < iendW; iw+=blockDim.y) { // loop on output: hierarchy h->w->c, so we could reuse weight factor f // because it remains the same for given oh & ow for(index_t oh = ostartH; oh < oendH; ++oh) { for(index_t ow = ostartW_cached[iw]; ow < oendW_cached[iw]; ++ow) { scalar_t f = r_kW_cached[ow] * r_kH_cached[oh]; const scalar_t* ptr_gradOutput = gradOutput + oh*ostrideH + ow*ostrideW; int cached_index = threadIdx.x; for (index_t c = threadIdx.x + channel_id*blockDim.x; c < sizeC; c += blockDim.x*kernel_stride_C) { out_cached[cached_index] += ptr_gradOutput[c*ostrideC] * f; cached_index += blockDim.x; } } } scalar_t *ptr_gradInput = gradInput + (ih * isizeW + iw) * sizeC; int cached_index = threadIdx.x; // write accumulated gradIput to global memory; for (index_t c = threadIdx.x + channel_id*blockDim.x; c < sizeC; c += blockDim.x*kernel_stride_C) { ptr_gradInput[c] = out_cached[cached_index]; out_cached[cached_index] = scalar_t(0.0); cached_index += blockDim.x; } // no need to __syncthreads() since out_cached is not shared. } } } // 4d tensor B x D x H x W void adaptive_avg_pool2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size) { TensorArg input_arg{ input, "input", 1 }, output_arg{ output, "output", 2 }; checkAllSameGPU("cudnn_adaptive_avg_pooling2d", {input_arg, output_arg}); for (int64_t i = 0; i < input.ndimension(); i++) { TORCH_CHECK(input.size(i) > 0, "adaptive_avg_pooling2d(): expected input to have non-empty spatial dimensions, " "but input has sizes ", input.sizes(), " with dimension ", i, " being " "empty"); } Tensor input_ = input; switch (input.suggest_memory_format()) { case at::MemoryFormat::ChannelsLast: { // special case for tensor memory format in channels_last TORCH_CHECK(input.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); int sizeB = input_.size(0); int sizeC = input_.size(1); int isizeH = input_.size(2); int isizeW = input_.size(3); int64_t istrideB = input_.stride(0); int64_t istrideC = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); int osizeH = output_size[0]; int osizeW = output_size[1]; // preserve channels_last stride on output tensor; if (!output.is_contiguous(at::MemoryFormat::ChannelsLast)) { // TODO: modify this after resize_ added `memory_format` tag output.resize_({sizeB, sizeC, osizeH, osizeW}).as_strided_({sizeB, sizeC, osizeH, osizeW}, {sizeC*osizeH*osizeW, 1, osizeW*sizeC, sizeC}); } const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; // Launch kernel on output tensor elements. Logic behind launch config: // output tensor size NCHW, strides NHWC; // Launch on: // N -> grid.x // H -> grid.z * block.z // W -> grid.y * block.y // C -> block.x // encourage larger block_y & block_z for better cache hit while maintain // reasonable block_x for coalesced memory access; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(osizeW), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(osizeH), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = cuda::ATenCeilDiv(sizeC, block_x * 4); int kernel_size_C = cuda::ATenCeilDiv(sizeC, block_x * kernel_stride_C); int grid_x = sizeB*kernel_stride_C; int grid_y = cuda::ATenCeilDiv(osizeW, block_y*BLOCK_STRIDE); int grid_z = cuda::ATenCeilDiv(osizeH, block_z*BLOCK_STRIDE); const dim3 grid(grid_x, grid_y, grid_z); // we are dealing with packed tensor here. max index is the same as numel. // TODO: to really support input tensor large enought to go beyond int32, // we will need to restrict out shared memory usage and adjust the launch // config; AT_ASSERT(input_.numel() < std::numeric_limits<int32_t>::max()); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input_.scalar_type(), "adaptive_avg_pool2d_nhwc_cuda", [&] { hipLaunchKernelGGL(( adaptive_average_pool_nhwc<int32_t>), dim3(grid), dim3(block), kernel_size_C * block_x * block_y * block_z * sizeof(scalar_t), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), sizeB, sizeC, isizeH, isizeW, osizeH, osizeW, kernel_stride_C, kernel_size_C, istrideB, istrideC, istrideH, istrideW); } ); break; } case at::MemoryFormat::Contiguous: { TORCH_CHECK((input.ndimension() == 3 || input.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); int64_t grid_x = input.size(-3); if (input.ndimension() == 4) { input_ = input.contiguous(); grid_x *= input_.size(-4); } int64_t sizeD = input_.size(-3); int64_t isizeH = input_.size(-2); int64_t isizeW = input_.size(-1); int64_t istrideD = input_.stride(-3); int64_t istrideH = input_.stride(-2); int64_t istrideW = input_.stride(-1); int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; if (input.ndimension() == 4) { output.resize_({input_.size(-4), sizeD, osizeH, osizeW}); } else { output.resize_({sizeD, osizeH, osizeW}); } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input_.scalar_type(), "adaptive_avg_pool2d_cuda", [&] { scalar_t *input_data = input_.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); // cuda blocks & threads: int blocksH = std::max<int64_t>((int)(16L / sizeD), 1); dim3 blocks(grid_x, blocksH); dim3 threads(32, 8); // run averagepool kernel hipLaunchKernelGGL(( adaptive_average_pool) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } ); break; } default: TORCH_CHECK( false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } THCudaCheck(hipGetLastError()); } void adaptive_avg_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input) { TensorArg grad_input_arg{ gradInput, "gradInput", 1 }, grad_output_arg{ gradOutput_, "gradOutput_", 2 }, input_arg{ input, "input", 3 }; checkAllSameGPU("cudnn_adaptive_avg_pooling2d_out", {grad_input_arg, grad_output_arg, input_arg}); switch (input.suggest_memory_format()) { case at::MemoryFormat::ChannelsLast: { // special case for tensor memory format in channels_last TORCH_CHECK(input.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); int sizeB = input.size(0); int sizeC = input.size(1); int isizeH = input.size(2); int isizeW = input.size(3); Tensor gradOutput = gradOutput_; int64_t ostrideB = gradOutput.stride(0); int64_t ostrideC = gradOutput.stride(1); int64_t ostrideH = gradOutput.stride(2); int64_t ostrideW = gradOutput.stride(3); int osizeH = gradOutput.size(-2); int osizeW = gradOutput.size(-1); // preserve channels_last stride on input tensor; if (!gradInput.is_contiguous(at::MemoryFormat::ChannelsLast)) { gradInput.as_strided_( {sizeB, sizeC, isizeH, isizeW}, {sizeC*isizeH*isizeW, 1, isizeW*sizeC, sizeC}); } const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; // Launch kernel on input tensor elements. Logic behind launch config: // input tensor size NCHW, strides NHWC; // Launch on: // N(C) -> grid.x (striding on C to reduce sh_mem usage) // H -> grid.z * block.z // W -> grid.y * block.y // C -> block.x // encourage larger block_y & block_z for better cache hit while maintain // reasonable block_x for coalesced memory access; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(isizeW), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(isizeH), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = cuda::ATenCeilDiv(sizeC, block_x * 4); int kernel_size_C = cuda::ATenCeilDiv(sizeC, block_x * kernel_stride_C); int grid_x = sizeB*kernel_stride_C; int grid_y = cuda::ATenCeilDiv(isizeW, block_y*BLOCK_STRIDE); int grid_z = cuda::ATenCeilDiv(isizeH, block_z*BLOCK_STRIDE); const dim3 grid(grid_x, grid_y, grid_z); // we are dealing with packed tensor here. max index is the same as numel. // TODO: to really support input tensor large enought to go beyond int32, // we will need to restrict out shared memory usage and adjust the launch // config; AT_ASSERT(input.numel() < std::numeric_limits<int32_t>::max()); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "adaptive_avg_pool2d_backward_nhwc_cuda", [&] { hipLaunchKernelGGL(( adaptive_average_gradinput_nhwc<int32_t>), dim3(grid), dim3(block), (kernel_size_C * block_x * block_y * block_z + osizeH + osizeW) * sizeof(scalar_t) + 2 * isizeW * sizeof(int32_t), at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput.data_ptr<scalar_t>(), gradOutput.data_ptr<scalar_t>(), sizeB, sizeC, isizeH, isizeW, osizeH, osizeW, kernel_stride_C, kernel_size_C, ostrideB, ostrideC, ostrideH, ostrideW); } ); break; } case at::MemoryFormat::Contiguous: { bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests Tensor gradOutput = gradOutput_.contiguous(); int64_t sizeD = input.size(-3); int64_t isizeH = input.size(-2); int64_t isizeW = input.size(-1); int64_t osizeH = gradOutput.size(-2); int64_t osizeW = gradOutput.size(-1); int64_t grid_x = sizeD; if (input.ndimension() == 4) grid_x *= input.size(-4); //bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "adaptive_avg_pool2d_backward_cuda", [&] { scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); // cuda blocks & threads: int blocksH = ::max((int)(16L / sizeD), 1); dim3 blocks(grid_x, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomic_adaptive_average_gradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel hipLaunchKernelGGL(( adaptive_average_gradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } } ); break; } default: TORCH_CHECK( false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } THCudaCheck(hipGetLastError()); } } // namespace Tensor& adaptive_avg_pool2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size) { adaptive_avg_pool2d_out_cuda_template( output, input, output_size); return output; } Tensor adaptive_avg_pool2d_cuda( at::Tensor const& input, IntArrayRef output_size) { auto output = at::empty({0}, input.options()); adaptive_avg_pool2d_out_cuda_template( output, input, output_size); return output; } Tensor& adaptive_avg_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input) { gradInput.resize_as_(input); adaptive_avg_pool2d_backward_out_cuda_template( gradInput, gradOutput, input); return gradInput; } Tensor adaptive_avg_pool2d_backward_cuda( const Tensor& gradOutput, const Tensor& input) { auto gradInput = at::zeros_like(input); adaptive_avg_pool2d_backward_out_cuda_template( gradInput, gradOutput, input); return gradInput; } } // at::native } // at #undef BLOCK_STRIDE #undef CUDA_MAX_THREADS #undef START_IND #undef END_IND
9d975a6e75fe92298acbabbbcb41f0708bc8dd1b.cu
#include "ATen/ATen.h" #include "ATen/cuda/CUDAApplyUtils.cuh" #include "ATen/cuda/CUDAContext.h" #include "ATen/NativeFunctions.h" #include "ATen/TensorUtils.h" #include "ATen/Utils.h" #include "c10/util/Exception.h" #include <THC/THCGeneral.h> #include "THC/THCNumerics.cuh" #include <ATen/native/cuda/LaunchUtils.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <algorithm> #include <cfloat> #include <cmath> #define START_IND(a,b,c) (int)std::floor((float)(a * c) / b) #define END_IND(a,b,c) (int)std::ceil((float)((a + 1) * c) / b) #define START_IND_INT(a,b,c) ((a * c) / b) #define END_IND_INT(a,b,c) (((a + 1) * c + b - 1) / b) // #define START_IND(a,b,c) a * c / b // #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched namespace at { namespace native { namespace { // 4d tensor B x D x H x W // All kernels view batch dim B and feature dim D as collapsed. /* * Description: * this function adaptively average pools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output */ template <typename T> __global__ void adaptive_average_pool(T *input, T *output, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators on output pixels int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling over corresponding input pixels T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; T sum = ScalarConvert<int, T>::to(0); int ih, iw; for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { T val = ptr_input[iw*istrideW]; sum += val; } ptr_input += istrideH; // next input line } // Update output *ptr_output = sum / kH / kW; } } } /* * Description: * this function computes the gradInput from gradOutput */ template <typename T> __global__ void adaptive_average_gradinput( T *gradInput, T *gradOutput, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators on input pixels int ih, iw; // select input/output plane based on thread/block ID int i_plane = blockIdx.x; int o_plane = i_plane; gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; int istartH = blockDim.y*blockIdx.y + threadIdx.y; int iendH = isizeH; int istepH = blockDim.y*gridDim.y; int istartW = threadIdx.x; int iendW = isizeW; int istepW = blockDim.x; // compute gradInput for(ih = istartH; ih < iendH; ih += istepH) { int ostartH = START_IND(ih, isizeH, osizeH); int oendH = END_IND(ih, isizeH, osizeH); for(iw = istartW; iw < iendW; iw += istepW) { int ostartW = START_IND(iw, isizeW, osizeW); int oendW = END_IND(iw, isizeW, osizeW); // Compute the gradients over corresponding output pixels T *ptr_gradInput = gradInput + ih*isizeW + iw; int oh, ow; for(oh = ostartH; oh < oendH; ++oh) { int kH = START_IND(oh, osizeH, isizeH) - END_IND(oh, osizeH, isizeH); for(ow = ostartW; ow < oendW; ++ow) { int kW = START_IND(ow, osizeW, isizeW) - END_IND(ow, osizeW, isizeW); T grad_delta = gradOutput[ow + oh*osizeW] / kH / kW; *ptr_gradInput += grad_delta; } } } } } /* * Description: * this function computes the gradInput from gradOutput * (uses atomic add) */ template <typename T> __global__ void atomic_adaptive_average_gradinput( T *gradInput, T *gradOutput, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators on output indices int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; gradOutput = gradOutput + o_plane*osizeW*osizeH; gradInput = gradInput + i_plane*isizeW*isizeH; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients for over corresponding input pixels T *ptr_gradInput = gradInput + istartH*isizeW + istartW; T *ptr_gradOutput = gradOutput + oh*osizeW + ow; T grad_delta = *ptr_gradOutput / kW / kH; int ih, iw; for(ih = 0; ih < kH; ++ih) { for(iw = 0; iw < kW; ++iw) { // atomic add since different threads could update same variable atomicAdd(&(ptr_gradInput[iw]), grad_delta); } ptr_gradInput += isizeW; // next input line } } } } /* * Description: * this function adaptively average pools an input 4D tensor along dimensions 2 and 3 * NHWC layout for both input and output tensor * 4D input, 4D output */ template <typename index_t, typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void adaptive_average_pool_nhwc(const scalar_t* __restrict__ input, scalar_t* __restrict__ output, int sizeB, int sizeC, int isizeH, int isizeW, int osizeH, int osizeW, int kernel_stride_C, int kernel_size_C, index_t istrideB, index_t istrideC, index_t istrideH, index_t istrideW) { extern __shared__ int smem[]; scalar_t *out_cached = reinterpret_cast<scalar_t*>(smem); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); // each CTA handles a portion of a single slice on batch dimension; int batch_id = blockIdx.x % sizeB; int channel_id = blockIdx.x / sizeB; // each CTA handles a single slice on batch dimension; // We use gridDim.x to handle striding on C as well. output = output + batch_id * osizeH * osizeW * sizeC; input = input + batch_id * istrideB; // split out_cached and exclusively it assigned to each thread; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C * blockDim.x]; // iterate on output H & W. // Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on // tile so there's a better chance to hit L1 cache. index_t oH = (osizeH + gridDim.z-1) / gridDim.z; index_t oW = (osizeW + gridDim.y-1) / gridDim.y; index_t ostartH = threadIdx.z + blockIdx.z*oH; index_t oendH = ::min(ostartH+oH, osizeH); index_t ostartW = threadIdx.y + blockIdx.y*oW; index_t oendW = ::min(ostartW+oW, osizeW); // Stride for threads, each warp can reuse L1 as they go. So theoretically // better chance to survive cache eviction. for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { int istartH = START_IND_INT(oh, osizeH, isizeH); int iendH = END_IND_INT(oh, osizeH, isizeH); for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int istartW = START_IND_INT(ow, osizeW, isizeW); int iendW = END_IND_INT(ow, osizeW, isizeW); scalar_t factor = scalar_t(1.0) / ((iendH-istartH) * (iendW-istartW)); // loop on input: hierarchy h->w->c, use shared memory here hopefully // would not stall global memory read; for (index_t ih = istartH; ih < iendH; ih++) { for (index_t iw = istartW; iw < iendW; iw++) { int cached_index = threadIdx.x; const scalar_t *ptr_input = input + ih*istrideH + iw*istrideW; for (index_t c = threadIdx.x + channel_id*blockDim.x; c < sizeC; c += blockDim.x*kernel_stride_C) { out_cached[cached_index] += ptr_input[c*istrideC]; cached_index += blockDim.x; } } } scalar_t *ptr_output = output + (oh * osizeW + ow) * sizeC; int cached_index = threadIdx.x; // write accumulated output to global memory; for (index_t c = threadIdx.x + channel_id*blockDim.x; c < sizeC; c += blockDim.x*kernel_stride_C) { // This causes numerical issueptr when unit test with NCHW kernel; // switch to could verify the correctness; // output[c] = out_cached[c] / (iendH-istartH) / (iendW-istartW); ptr_output[c] = out_cached[cached_index] * factor; out_cached[cached_index] = scalar_t(0.0); cached_index += blockDim.x; } // no need to __syncthreads() since out_cached is not shared. } } } /* * Description: * this function computes the gradInput from gradOutput * NHWC layout for both input and output tensor * 4D input, 4D output */ template <typename index_t, typename scalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void adaptive_average_gradinput_nhwc(scalar_t* __restrict__ gradInput, const scalar_t* __restrict__ gradOutput, int sizeB, int sizeC, int isizeH, int isizeW, int osizeH, int osizeW, int kernel_stride_C, int kernel_size_C, index_t ostrideB, index_t ostrideC, index_t ostrideH, index_t ostrideW) { extern __shared__ int smem[]; index_t *ostartW_cached = smem; index_t *oendW_cached = &ostartW_cached[isizeW]; // be careful with alignment, in case scalar_t is fp16, we want to assign // int pointers first. scalar_t *r_kW_cached = reinterpret_cast<scalar_t*>(&oendW_cached[isizeW]); scalar_t *r_kH_cached = &r_kW_cached[osizeW]; scalar_t *out_cached = &r_kH_cached[osizeH]; // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // Precompute output start/end index per input index on width dimension; // Not doing this for height dimension, as that's our out-most loop. for (index_t i = thread_id; i < isizeW; i+= block_size) { ostartW_cached[i] = START_IND_INT(i, isizeW, osizeW); oendW_cached[i] = END_IND_INT(i, isizeW, osizeW); } // Precompute pooling height/weight factor for each output element; // This is used to weight output gradient when accumulate them on input // gradient. // Technically we don't have to compute it for the whole `osizeH`, since // each cta only covers a consecutive portion of the entire output. But it's // not going to save us from code divergence, and shared memory save is not // an issue neither, so just leave it as is for now. for (index_t i = thread_id; i < osizeH; i+= block_size) { r_kH_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeH, isizeH) - START_IND_INT(i, osizeH, isizeH)); } for (index_t i = thread_id; i < osizeW; i+= block_size) { r_kW_cached[i] = scalar_t(1.0) / (END_IND_INT(i, osizeW, isizeW) - START_IND_INT(i, osizeW, isizeW)); } // each CTA handles a portion of a single slice on batch dimension; int batch_id = blockIdx.x % sizeB; int channel_id = blockIdx.x / sizeB; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (index_t i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = scalar_t(0.0); } __syncthreads(); // each CTA handles a portion of a single slice on batch dimension; // We use gridDim.x to handle striding on C as well. gradInput = gradInput + batch_id * isizeH * isizeW * sizeC; gradOutput = gradOutput + batch_id * ostrideB; // split out_cached and exclusively it assigned to each thread; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x * kernel_size_C]; // iterate on input H & W. // Each CTA handles a consecutive H & W section (TILE); Do NOT stride CTA on // tile so there's a better chance to hit L1 cache. index_t iH = (isizeH + gridDim.z-1) / gridDim.z; index_t iW = (isizeW + gridDim.y-1) / gridDim.y; index_t istartH = threadIdx.z + blockIdx.z*iH; index_t iendH = ::min(istartH+iH, isizeH); index_t istartW = threadIdx.y + blockIdx.y*iW; index_t iendW = ::min(istartW+iW, isizeW); // Stride for threads, each warp can reuse L1 as they go. So theoretically // better chance to survive cache eviction. for (index_t ih = istartH; ih < iendH; ih+=blockDim.z) { index_t ostartH = START_IND_INT(ih, isizeH, osizeH); index_t oendH = END_IND_INT(ih, isizeH, osizeH); for (index_t iw = istartW; iw < iendW; iw+=blockDim.y) { // loop on output: hierarchy h->w->c, so we could reuse weight factor f // because it remains the same for given oh & ow for(index_t oh = ostartH; oh < oendH; ++oh) { for(index_t ow = ostartW_cached[iw]; ow < oendW_cached[iw]; ++ow) { scalar_t f = r_kW_cached[ow] * r_kH_cached[oh]; const scalar_t* ptr_gradOutput = gradOutput + oh*ostrideH + ow*ostrideW; int cached_index = threadIdx.x; for (index_t c = threadIdx.x + channel_id*blockDim.x; c < sizeC; c += blockDim.x*kernel_stride_C) { out_cached[cached_index] += ptr_gradOutput[c*ostrideC] * f; cached_index += blockDim.x; } } } scalar_t *ptr_gradInput = gradInput + (ih * isizeW + iw) * sizeC; int cached_index = threadIdx.x; // write accumulated gradIput to global memory; for (index_t c = threadIdx.x + channel_id*blockDim.x; c < sizeC; c += blockDim.x*kernel_stride_C) { ptr_gradInput[c] = out_cached[cached_index]; out_cached[cached_index] = scalar_t(0.0); cached_index += blockDim.x; } // no need to __syncthreads() since out_cached is not shared. } } } // 4d tensor B x D x H x W void adaptive_avg_pool2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef output_size) { TensorArg input_arg{ input, "input", 1 }, output_arg{ output, "output", 2 }; checkAllSameGPU("cudnn_adaptive_avg_pooling2d", {input_arg, output_arg}); for (int64_t i = 0; i < input.ndimension(); i++) { TORCH_CHECK(input.size(i) > 0, "adaptive_avg_pooling2d(): expected input to have non-empty spatial dimensions, " "but input has sizes ", input.sizes(), " with dimension ", i, " being " "empty"); } Tensor input_ = input; switch (input.suggest_memory_format()) { case at::MemoryFormat::ChannelsLast: { // special case for tensor memory format in channels_last TORCH_CHECK(input.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); int sizeB = input_.size(0); int sizeC = input_.size(1); int isizeH = input_.size(2); int isizeW = input_.size(3); int64_t istrideB = input_.stride(0); int64_t istrideC = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); int osizeH = output_size[0]; int osizeW = output_size[1]; // preserve channels_last stride on output tensor; if (!output.is_contiguous(at::MemoryFormat::ChannelsLast)) { // TODO: modify this after resize_ added `memory_format` tag output.resize_({sizeB, sizeC, osizeH, osizeW}).as_strided_({sizeB, sizeC, osizeH, osizeW}, {sizeC*osizeH*osizeW, 1, osizeW*sizeC, sizeC}); } const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; // Launch kernel on output tensor elements. Logic behind launch config: // output tensor size NCHW, strides NHWC; // Launch on: // N -> grid.x // H -> grid.z * block.z // W -> grid.y * block.y // C -> block.x // encourage larger block_y & block_z for better cache hit while maintain // reasonable block_x for coalesced memory access; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(osizeW), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(osizeH), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = cuda::ATenCeilDiv(sizeC, block_x * 4); int kernel_size_C = cuda::ATenCeilDiv(sizeC, block_x * kernel_stride_C); int grid_x = sizeB*kernel_stride_C; int grid_y = cuda::ATenCeilDiv(osizeW, block_y*BLOCK_STRIDE); int grid_z = cuda::ATenCeilDiv(osizeH, block_z*BLOCK_STRIDE); const dim3 grid(grid_x, grid_y, grid_z); // we are dealing with packed tensor here. max index is the same as numel. // TODO: to really support input tensor large enought to go beyond int32, // we will need to restrict out shared memory usage and adjust the launch // config; AT_ASSERT(input_.numel() < std::numeric_limits<int32_t>::max()); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input_.scalar_type(), "adaptive_avg_pool2d_nhwc_cuda", [&] { adaptive_average_pool_nhwc<int32_t><<<grid, block, kernel_size_C * block_x * block_y * block_z * sizeof(scalar_t), at::cuda::getCurrentCUDAStream()>>> ( input_.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), sizeB, sizeC, isizeH, isizeW, osizeH, osizeW, kernel_stride_C, kernel_size_C, istrideB, istrideC, istrideH, istrideW); } ); break; } case at::MemoryFormat::Contiguous: { TORCH_CHECK((input.ndimension() == 3 || input.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); int64_t grid_x = input.size(-3); if (input.ndimension() == 4) { input_ = input.contiguous(); grid_x *= input_.size(-4); } int64_t sizeD = input_.size(-3); int64_t isizeH = input_.size(-2); int64_t isizeW = input_.size(-1); int64_t istrideD = input_.stride(-3); int64_t istrideH = input_.stride(-2); int64_t istrideW = input_.stride(-1); int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; if (input.ndimension() == 4) { output.resize_({input_.size(-4), sizeD, osizeH, osizeW}); } else { output.resize_({sizeD, osizeH, osizeW}); } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input_.scalar_type(), "adaptive_avg_pool2d_cuda", [&] { scalar_t *input_data = input_.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); // cuda blocks & threads: int blocksH = std::max<int64_t>((int)(16L / sizeD), 1); dim3 blocks(grid_x, blocksH); dim3 threads(32, 8); // run averagepool kernel adaptive_average_pool <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( input_data, output_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } ); break; } default: TORCH_CHECK( false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } THCudaCheck(cudaGetLastError()); } void adaptive_avg_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input) { TensorArg grad_input_arg{ gradInput, "gradInput", 1 }, grad_output_arg{ gradOutput_, "gradOutput_", 2 }, input_arg{ input, "input", 3 }; checkAllSameGPU("cudnn_adaptive_avg_pooling2d_out", {grad_input_arg, grad_output_arg, input_arg}); switch (input.suggest_memory_format()) { case at::MemoryFormat::ChannelsLast: { // special case for tensor memory format in channels_last TORCH_CHECK(input.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); int sizeB = input.size(0); int sizeC = input.size(1); int isizeH = input.size(2); int isizeW = input.size(3); Tensor gradOutput = gradOutput_; int64_t ostrideB = gradOutput.stride(0); int64_t ostrideC = gradOutput.stride(1); int64_t ostrideH = gradOutput.stride(2); int64_t ostrideW = gradOutput.stride(3); int osizeH = gradOutput.size(-2); int osizeW = gradOutput.size(-1); // preserve channels_last stride on input tensor; if (!gradInput.is_contiguous(at::MemoryFormat::ChannelsLast)) { gradInput.as_strided_( {sizeB, sizeC, isizeH, isizeW}, {sizeC*isizeH*isizeW, 1, isizeW*sizeC, sizeC}); } const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; // Launch kernel on input tensor elements. Logic behind launch config: // input tensor size NCHW, strides NHWC; // Launch on: // N(C) -> grid.x (striding on C to reduce sh_mem usage) // H -> grid.z * block.z // W -> grid.y * block.y // C -> block.x // encourage larger block_y & block_z for better cache hit while maintain // reasonable block_x for coalesced memory access; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(isizeW), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(isizeH), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = cuda::ATenCeilDiv(sizeC, block_x * 4); int kernel_size_C = cuda::ATenCeilDiv(sizeC, block_x * kernel_stride_C); int grid_x = sizeB*kernel_stride_C; int grid_y = cuda::ATenCeilDiv(isizeW, block_y*BLOCK_STRIDE); int grid_z = cuda::ATenCeilDiv(isizeH, block_z*BLOCK_STRIDE); const dim3 grid(grid_x, grid_y, grid_z); // we are dealing with packed tensor here. max index is the same as numel. // TODO: to really support input tensor large enought to go beyond int32, // we will need to restrict out shared memory usage and adjust the launch // config; AT_ASSERT(input.numel() < std::numeric_limits<int32_t>::max()); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "adaptive_avg_pool2d_backward_nhwc_cuda", [&] { adaptive_average_gradinput_nhwc<int32_t><<<grid, block, (kernel_size_C * block_x * block_y * block_z + osizeH + osizeW) * sizeof(scalar_t) + 2 * isizeW * sizeof(int32_t), at::cuda::getCurrentCUDAStream()>>> ( gradInput.data_ptr<scalar_t>(), gradOutput.data_ptr<scalar_t>(), sizeB, sizeC, isizeH, isizeW, osizeH, osizeW, kernel_stride_C, kernel_size_C, ostrideB, ostrideC, ostrideH, ostrideW); } ); break; } case at::MemoryFormat::Contiguous: { bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests Tensor gradOutput = gradOutput_.contiguous(); int64_t sizeD = input.size(-3); int64_t isizeH = input.size(-2); int64_t isizeW = input.size(-1); int64_t osizeH = gradOutput.size(-2); int64_t osizeW = gradOutput.size(-1); int64_t grid_x = sizeD; if (input.ndimension() == 4) grid_x *= input.size(-4); //bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "adaptive_avg_pool2d_backward_cuda", [&] { scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); // cuda blocks & threads: int blocksH = std::max((int)(16L / sizeD), 1); dim3 blocks(grid_x, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically atomic_adaptive_average_gradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel adaptive_average_gradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, isizeH, isizeW, osizeH, osizeW); } } ); break; } default: TORCH_CHECK( false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } THCudaCheck(cudaGetLastError()); } } // namespace Tensor& adaptive_avg_pool2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size) { adaptive_avg_pool2d_out_cuda_template( output, input, output_size); return output; } Tensor adaptive_avg_pool2d_cuda( at::Tensor const& input, IntArrayRef output_size) { auto output = at::empty({0}, input.options()); adaptive_avg_pool2d_out_cuda_template( output, input, output_size); return output; } Tensor& adaptive_avg_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input) { gradInput.resize_as_(input); adaptive_avg_pool2d_backward_out_cuda_template( gradInput, gradOutput, input); return gradInput; } Tensor adaptive_avg_pool2d_backward_cuda( const Tensor& gradOutput, const Tensor& input) { auto gradInput = at::zeros_like(input); adaptive_avg_pool2d_backward_out_cuda_template( gradInput, gradOutput, input); return gradInput; } } // at::native } // at #undef BLOCK_STRIDE #undef CUDA_MAX_THREADS #undef START_IND #undef END_IND
6bf24d10a082c8f46f3ff4cee4397dfb6a381ae7.hip
// !!! This is a file automatically generated by hipify!!! // Source: http://web.mit.edu/pocky/www/cudaworkshop/MonteCarlo/Pi.cu // Written by Barry Wilkinson, UNC-Charlotte. Pi.cu December 22, 2010. //Derived somewhat from code developed by Patrick Rogers, UNC-C #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <math.h> #include <time.h> #include <hiprand/hiprand_kernel.h> #include <getopt.h> #define TRIALS_PER_THREAD 4096 #define BLOCKS 256 #define THREADS 256 #define PI 3.1415926535 // known value of pi __global__ void gpu_monte_carlo(float *estimate, hiprandState_t *states) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; float x, y; hiprand_init(1234, tid, 0, &states[tid]); // Initialize CURAND for(int i = 0; i < TRIALS_PER_THREAD; i++) { x = hiprand_uniform (&states[tid]); y = hiprand_uniform (&states[tid]); points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle. } estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD; // return estimate of pi } __global__ void gpu_monte_carlo_d(double *estimate, hiprandState_t *states) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; double x, y; hiprand_init(1234, tid, 0, &states[tid]); // Initialize CURAND for(int i = 0; i < TRIALS_PER_THREAD; i++) { x = hiprand_uniform (&states[tid]); y = hiprand_uniform (&states[tid]); points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle. } estimate[tid] = 4.0f * points_in_circle / (double) TRIALS_PER_THREAD; // return estimate of pi } float host_monte_carlo(long trials) { float x, y; long points_in_circle = 0; for(long i = 0; i < trials; i++) { x = rand() / (float) RAND_MAX; y = rand() / (float) RAND_MAX; points_in_circle += (x*x + y*y <= 1.0f); } return 4.0f * points_in_circle / trials; } double host_monte_carlo_d(long trials) { double x, y; long points_in_circle = 0; for(long i = 0; i < trials; i++) { x = rand() / (double) RAND_MAX; y = rand() / (double) RAND_MAX; points_in_circle += (x*x + y*y <= 1.0f); } return 4.0f * points_in_circle / trials; } int main(int argc, char **argv) { int dp = 0; int c; while((c = getopt(argc, argv, "d")) != -1){ switch(c){ case 'd': dp = 1; break; default: dp = 0; break; } } clock_t start, stop; if(!dp){ printf("Run with single precision\n"); float host[BLOCKS * THREADS]; float *dev; hiprandState_t *devStates; printf("# of trials per thread = %d, # of blocks = %d, # of threads/block = %d.\n", TRIALS_PER_THREAD, BLOCKS, THREADS); start = clock(); hipMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float)); // allocate device mem. for counts hipMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(hiprandState_t) ); hipLaunchKernelGGL(( gpu_monte_carlo), dim3(BLOCKS), dim3(THREADS), 0, 0, dev, devStates); hipMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), hipMemcpyDeviceToHost); // return results float pi_gpu; for(int i = 0; i < BLOCKS * THREADS; i++) { pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); stop = clock(); printf("GPU pi calculated in %lf s.\n", (double)(stop-start)/CLOCKS_PER_SEC); start = clock(); float pi_cpu = host_monte_carlo(BLOCKS * THREADS * TRIALS_PER_THREAD); stop = clock(); printf("CPU pi calculated in %lf s.\n", (double)(stop-start)/CLOCKS_PER_SEC); printf("CUDA estimate of PI = %f [error of %f]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %f [error of %f]\n", pi_cpu, pi_cpu - PI); }else{ printf("Run with double precision\n"); double host[BLOCKS * THREADS]; double *dev; hiprandState_t *devStates; printf("# of trials per thread = %d, # of blocks = %d, # of threads/block = %d.\n", TRIALS_PER_THREAD, BLOCKS, THREADS); start = clock(); hipMalloc((void **) &dev, BLOCKS * THREADS * sizeof(double)); // allocate device mem. for counts hipMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(hiprandState_t) ); hipLaunchKernelGGL(( gpu_monte_carlo_d), dim3(BLOCKS), dim3(THREADS), 0, 0, dev, devStates); hipMemcpy(host, dev, BLOCKS * THREADS * sizeof(double), hipMemcpyDeviceToHost); // return results double pi_gpu; for(int i = 0; i < BLOCKS * THREADS; i++) { pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); stop = clock(); printf("GPU pi calculated in %lf s.\n", (stop-start)/(double)CLOCKS_PER_SEC); start = clock(); double pi_cpu = host_monte_carlo_d(BLOCKS * THREADS * TRIALS_PER_THREAD); stop = clock(); printf("CPU pi calculated in %lf s.\n", (stop-start)/(double)CLOCKS_PER_SEC); printf("CUDA estimate of PI = %lf [error of %lf]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %lf [error of %lf]\n", pi_cpu, pi_cpu - PI); } return 0; }
6bf24d10a082c8f46f3ff4cee4397dfb6a381ae7.cu
// Source: http://web.mit.edu/pocky/www/cudaworkshop/MonteCarlo/Pi.cu // Written by Barry Wilkinson, UNC-Charlotte. Pi.cu December 22, 2010. //Derived somewhat from code developed by Patrick Rogers, UNC-C #include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <math.h> #include <time.h> #include <curand_kernel.h> #include <getopt.h> #define TRIALS_PER_THREAD 4096 #define BLOCKS 256 #define THREADS 256 #define PI 3.1415926535 // known value of pi __global__ void gpu_monte_carlo(float *estimate, curandState *states) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; float x, y; curand_init(1234, tid, 0, &states[tid]); // Initialize CURAND for(int i = 0; i < TRIALS_PER_THREAD; i++) { x = curand_uniform (&states[tid]); y = curand_uniform (&states[tid]); points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle. } estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD; // return estimate of pi } __global__ void gpu_monte_carlo_d(double *estimate, curandState *states) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; double x, y; curand_init(1234, tid, 0, &states[tid]); // Initialize CURAND for(int i = 0; i < TRIALS_PER_THREAD; i++) { x = curand_uniform (&states[tid]); y = curand_uniform (&states[tid]); points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle. } estimate[tid] = 4.0f * points_in_circle / (double) TRIALS_PER_THREAD; // return estimate of pi } float host_monte_carlo(long trials) { float x, y; long points_in_circle = 0; for(long i = 0; i < trials; i++) { x = rand() / (float) RAND_MAX; y = rand() / (float) RAND_MAX; points_in_circle += (x*x + y*y <= 1.0f); } return 4.0f * points_in_circle / trials; } double host_monte_carlo_d(long trials) { double x, y; long points_in_circle = 0; for(long i = 0; i < trials; i++) { x = rand() / (double) RAND_MAX; y = rand() / (double) RAND_MAX; points_in_circle += (x*x + y*y <= 1.0f); } return 4.0f * points_in_circle / trials; } int main(int argc, char **argv) { int dp = 0; int c; while((c = getopt(argc, argv, "d")) != -1){ switch(c){ case 'd': dp = 1; break; default: dp = 0; break; } } clock_t start, stop; if(!dp){ printf("Run with single precision\n"); float host[BLOCKS * THREADS]; float *dev; curandState *devStates; printf("# of trials per thread = %d, # of blocks = %d, # of threads/block = %d.\n", TRIALS_PER_THREAD, BLOCKS, THREADS); start = clock(); cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float)); // allocate device mem. for counts cudaMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(curandState) ); gpu_monte_carlo<<<BLOCKS, THREADS>>>(dev, devStates); cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), cudaMemcpyDeviceToHost); // return results float pi_gpu; for(int i = 0; i < BLOCKS * THREADS; i++) { pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); stop = clock(); printf("GPU pi calculated in %lf s.\n", (double)(stop-start)/CLOCKS_PER_SEC); start = clock(); float pi_cpu = host_monte_carlo(BLOCKS * THREADS * TRIALS_PER_THREAD); stop = clock(); printf("CPU pi calculated in %lf s.\n", (double)(stop-start)/CLOCKS_PER_SEC); printf("CUDA estimate of PI = %f [error of %f]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %f [error of %f]\n", pi_cpu, pi_cpu - PI); }else{ printf("Run with double precision\n"); double host[BLOCKS * THREADS]; double *dev; curandState *devStates; printf("# of trials per thread = %d, # of blocks = %d, # of threads/block = %d.\n", TRIALS_PER_THREAD, BLOCKS, THREADS); start = clock(); cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(double)); // allocate device mem. for counts cudaMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(curandState) ); gpu_monte_carlo_d<<<BLOCKS, THREADS>>>(dev, devStates); cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(double), cudaMemcpyDeviceToHost); // return results double pi_gpu; for(int i = 0; i < BLOCKS * THREADS; i++) { pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); stop = clock(); printf("GPU pi calculated in %lf s.\n", (stop-start)/(double)CLOCKS_PER_SEC); start = clock(); double pi_cpu = host_monte_carlo_d(BLOCKS * THREADS * TRIALS_PER_THREAD); stop = clock(); printf("CPU pi calculated in %lf s.\n", (stop-start)/(double)CLOCKS_PER_SEC); printf("CUDA estimate of PI = %lf [error of %lf]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %lf [error of %lf]\n", pi_cpu, pi_cpu - PI); } return 0; }
5992478a931be666fc64c354fba44f7e3854e1a8.hip
// !!! This is a file automatically generated by hipify!!! #include <color_spinor_field.h> #include <tune_quda.h> #include <typeinfo> #include <launch_kernel.cuh> #include <jitify_helper.cuh> #include <kernels/restrictor.cuh> namespace quda { #ifdef GPU_MULTIGRID template <typename Float, typename vFloat, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int coarse_colors_per_thread> class RestrictLaunch : public Tunable { protected: ColorSpinorField &out; const ColorSpinorField &in; const ColorSpinorField &v; const int *fine_to_coarse; const int *coarse_to_fine; const int parity; const QudaFieldLocation location; const int block_size; char vol[TuneKey::volume_n]; unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. bool tuneAuxDim() const { return true; } // Do tune the aux dimensions. unsigned int minThreads() const { return in.VolumeCB(); } // fine parity is the block y dimension public: RestrictLaunch(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v, const int *fine_to_coarse, const int *coarse_to_fine, int parity) : out(out), in(in), v(v), fine_to_coarse(fine_to_coarse), coarse_to_fine(coarse_to_fine), parity(parity), location(checkLocation(out,in,v)), block_size(in.VolumeCB()/(2*out.VolumeCB())) { if (v.Location() == QUDA_CUDA_FIELD_LOCATION) { #ifdef JITIFY create_jitify_program("kernels/restrictor.cuh"); #endif } strcpy(aux, compile_type_str(in)); strcat(aux, out.AuxString()); strcat(aux, ","); strcat(aux, in.AuxString()); strcpy(vol, out.VolString()); strcat(vol, ","); strcat(vol, in.VolString()); } // block size is checkerboard fine length / full coarse length virtual ~RestrictLaunch() { } void apply(const hipStream_t &stream) { if (location == QUDA_CPU_FIELD_LOCATION) { if (out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) { RestrictArg<Float,vFloat,fineSpin,fineColor,coarseSpin,coarseColor,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER> arg(out, in, v, fine_to_coarse, coarse_to_fine, parity); Restrict<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread>(arg); } else { errorQuda("Unsupported field order %d", out.FieldOrder()); } } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) { typedef RestrictArg<Float,vFloat,fineSpin,fineColor,coarseSpin,coarseColor,QUDA_FLOAT2_FIELD_ORDER> Arg; Arg arg(out, in, v, fine_to_coarse, coarse_to_fine, parity); arg.swizzle = tp.aux.x; #ifdef JITIFY using namespace jitify::reflection; jitify_error = program->kernel("quda::RestrictKernel") .instantiate((int)tp.block.x,Type<Float>(),fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Type<Arg>()) .configure(tp.grid,tp.block,tp.shared_bytes,stream).launch(arg); #else LAUNCH_KERNEL_MG_BLOCK_SIZE(RestrictKernel,tp,stream,arg,Float,fineSpin,fineColor, coarseSpin,coarseColor,coarse_colors_per_thread,Arg); #endif } else { errorQuda("Unsupported field order %d", out.FieldOrder()); } } } // This block tuning tunes for the optimal amount of color // splitting between blockDim.z and gridDim.z. However, enabling // blockDim.z > 1 gives incorrect results due to cub reductions // being unable to do independent sliced reductions along // blockDim.z. So for now we only split between colors per thread // and grid.z. bool advanceBlockDim(TuneParam &param) const { // let's try to advance spin/block-color while(param.block.z <= coarseColor/coarse_colors_per_thread) { param.block.z++; if ( (coarseColor/coarse_colors_per_thread) % param.block.z == 0) { param.grid.z = (coarseColor/coarse_colors_per_thread) / param.block.z; break; } } // we can advance spin/block-color since this is valid if (param.block.z <= (coarseColor/coarse_colors_per_thread) ) { // return true; } else { // we have run off the end so let's reset param.block.z = 1; param.grid.z = coarseColor/coarse_colors_per_thread; return false; } } int tuningIter() const { return 3; } bool advanceAux(TuneParam &param) const { #ifdef SWIZZLE if (param.aux.x < 2*deviceProp.multiProcessorCount) { param.aux.x++; return true; } else { param.aux.x = 1; return false; } #else return false; #endif } // only tune shared memory per thread (disable tuning for block.z for now) bool advanceTuneParam(TuneParam &param) const { return advanceSharedBytes(param) || advanceAux(param); } TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); } void initTuneParam(TuneParam &param) const { defaultTuneParam(param); } /** sets default values for when tuning is disabled */ void defaultTuneParam(TuneParam &param) const { param.block = dim3(block_size, in.SiteSubset(), 1); param.grid = dim3( (minThreads()+param.block.x-1) / param.block.x, 1, 1); param.shared_bytes = 0; param.block.z = 1; param.grid.z = coarseColor / coarse_colors_per_thread; param.aux.x = 1; // swizzle factor } long long flops() const { return 8 * fineSpin * fineColor * coarseColor * in.SiteSubset()*(long long)in.VolumeCB(); } long long bytes() const { size_t v_bytes = v.Bytes() / (v.SiteSubset() == in.SiteSubset() ? 1 : 2); return in.Bytes() + out.Bytes() + v_bytes + in.SiteSubset()*in.VolumeCB()*sizeof(int); } }; template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor> void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v, const int *fine_to_coarse, const int *coarse_to_fine, int parity) { // for fine grids (Nc=3) have more parallelism so can use more coarse strategy constexpr int coarse_colors_per_thread = fineColor != 3 ? 2 : coarseColor >= 4 && coarseColor % 4 == 0 ? 4 : 2; //coarseColor >= 8 && coarseColor % 8 == 0 ? 8 : coarseColor >= 4 && coarseColor % 4 == 0 ? 4 : 2; if (v.Precision() == QUDA_HALF_PRECISION) { #if QUDA_PRECISION & 2 RestrictLaunch<Float, short, fineSpin, fineColor, coarseSpin, coarseColor, coarse_colors_per_thread> restrictor(out, in, v, fine_to_coarse, coarse_to_fine, parity); restrictor.apply(0); #else errorQuda("QUDA_PRECISION=%d does not enable half precision", QUDA_PRECISION); #endif } else if (v.Precision() == in.Precision()) { RestrictLaunch<Float, Float, fineSpin, fineColor, coarseSpin, coarseColor, coarse_colors_per_thread> restrictor(out, in, v, fine_to_coarse, coarse_to_fine, parity); restrictor.apply(0); } else { errorQuda("Unsupported V precision %d", v.Precision()); } if (checkLocation(out, in, v) == QUDA_CUDA_FIELD_LOCATION) checkCudaError(); } template <typename Float, int fineSpin> void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v, int nVec, const int *fine_to_coarse, const int *coarse_to_fine, const int * const * spin_map, int parity) { if (out.Nspin() != 2) errorQuda("Unsupported nSpin %d", out.Nspin()); const int coarseSpin = 2; // first check that the spin_map matches the spin_mapper spin_mapper<fineSpin,coarseSpin> mapper; for (int s=0; s<fineSpin; s++) for (int p=0; p<2; p++) if (mapper(s,p) != spin_map[s][p]) errorQuda("Spin map does not match spin_mapper"); // Template over fine color if (in.Ncolor() == 3) { // standard QCD const int fineColor = 3; if (nVec == 4) { Restrict<Float,fineSpin,fineColor,coarseSpin,4>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else if (nVec == 6) { // free field Wilson Restrict<Float,fineSpin,fineColor,coarseSpin,6>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else if (nVec == 24) { Restrict<Float,fineSpin,fineColor,coarseSpin,24>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else if (nVec == 32) { Restrict<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else { errorQuda("Unsupported nVec %d", nVec); } } else if (in.Ncolor() == 6) { // Coarsen coarsened Wilson free field const int fineColor = 6; if (nVec == 6) { Restrict<Float,fineSpin,fineColor,coarseSpin,6>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else { errorQuda("Unsupported nVec %d", nVec); } } else if (in.Ncolor() == 24) { // to keep compilation under control coarse grids have same or more colors const int fineColor = 24; if (nVec == 24) { Restrict<Float,fineSpin,fineColor,coarseSpin,24>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else if (nVec == 32) { Restrict<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else { errorQuda("Unsupported nVec %d", nVec); } } else if (in.Ncolor() == 32) { const int fineColor = 32; if (nVec == 32) { Restrict<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else { errorQuda("Unsupported nVec %d", nVec); } } else { errorQuda("Unsupported nColor %d", in.Ncolor()); } } template <typename Float> void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v, int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int * const * spin_map, int parity) { if (in.Nspin() == 2) { Restrict<Float,2>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity); #ifdef GPU_WILSON_DIRAC } else if (in.Nspin() == 4) { Restrict<Float,4>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity); #endif #if GPU_STAGGERED_DIRAC } else if (in.Nspin() == 1) { Restrict<Float,1>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity); #endif } else { errorQuda("Unsupported nSpin %d", in.Nspin()); } } #endif // GPU_MULTIGRID void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v, int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int * const * spin_map, int parity) { #ifdef GPU_MULTIGRID if (out.FieldOrder() != in.FieldOrder() || out.FieldOrder() != v.FieldOrder()) errorQuda("Field orders do not match (out=%d, in=%d, v=%d)", out.FieldOrder(), in.FieldOrder(), v.FieldOrder()); QudaPrecision precision = checkPrecision(out, in); if (precision == QUDA_DOUBLE_PRECISION) { #ifdef GPU_MULTIGRID_DOUBLE Restrict<double>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity); #else errorQuda("Double precision multigrid has not been enabled"); #endif } else if (precision == QUDA_SINGLE_PRECISION) { Restrict<float>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity); } else { errorQuda("Unsupported precision %d", out.Precision()); } #else errorQuda("Multigrid has not been built"); #endif } } // namespace quda
5992478a931be666fc64c354fba44f7e3854e1a8.cu
#include <color_spinor_field.h> #include <tune_quda.h> #include <typeinfo> #include <launch_kernel.cuh> #include <jitify_helper.cuh> #include <kernels/restrictor.cuh> namespace quda { #ifdef GPU_MULTIGRID template <typename Float, typename vFloat, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int coarse_colors_per_thread> class RestrictLaunch : public Tunable { protected: ColorSpinorField &out; const ColorSpinorField &in; const ColorSpinorField &v; const int *fine_to_coarse; const int *coarse_to_fine; const int parity; const QudaFieldLocation location; const int block_size; char vol[TuneKey::volume_n]; unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. bool tuneAuxDim() const { return true; } // Do tune the aux dimensions. unsigned int minThreads() const { return in.VolumeCB(); } // fine parity is the block y dimension public: RestrictLaunch(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v, const int *fine_to_coarse, const int *coarse_to_fine, int parity) : out(out), in(in), v(v), fine_to_coarse(fine_to_coarse), coarse_to_fine(coarse_to_fine), parity(parity), location(checkLocation(out,in,v)), block_size(in.VolumeCB()/(2*out.VolumeCB())) { if (v.Location() == QUDA_CUDA_FIELD_LOCATION) { #ifdef JITIFY create_jitify_program("kernels/restrictor.cuh"); #endif } strcpy(aux, compile_type_str(in)); strcat(aux, out.AuxString()); strcat(aux, ","); strcat(aux, in.AuxString()); strcpy(vol, out.VolString()); strcat(vol, ","); strcat(vol, in.VolString()); } // block size is checkerboard fine length / full coarse length virtual ~RestrictLaunch() { } void apply(const cudaStream_t &stream) { if (location == QUDA_CPU_FIELD_LOCATION) { if (out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) { RestrictArg<Float,vFloat,fineSpin,fineColor,coarseSpin,coarseColor,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER> arg(out, in, v, fine_to_coarse, coarse_to_fine, parity); Restrict<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread>(arg); } else { errorQuda("Unsupported field order %d", out.FieldOrder()); } } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) { typedef RestrictArg<Float,vFloat,fineSpin,fineColor,coarseSpin,coarseColor,QUDA_FLOAT2_FIELD_ORDER> Arg; Arg arg(out, in, v, fine_to_coarse, coarse_to_fine, parity); arg.swizzle = tp.aux.x; #ifdef JITIFY using namespace jitify::reflection; jitify_error = program->kernel("quda::RestrictKernel") .instantiate((int)tp.block.x,Type<Float>(),fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Type<Arg>()) .configure(tp.grid,tp.block,tp.shared_bytes,stream).launch(arg); #else LAUNCH_KERNEL_MG_BLOCK_SIZE(RestrictKernel,tp,stream,arg,Float,fineSpin,fineColor, coarseSpin,coarseColor,coarse_colors_per_thread,Arg); #endif } else { errorQuda("Unsupported field order %d", out.FieldOrder()); } } } // This block tuning tunes for the optimal amount of color // splitting between blockDim.z and gridDim.z. However, enabling // blockDim.z > 1 gives incorrect results due to cub reductions // being unable to do independent sliced reductions along // blockDim.z. So for now we only split between colors per thread // and grid.z. bool advanceBlockDim(TuneParam &param) const { // let's try to advance spin/block-color while(param.block.z <= coarseColor/coarse_colors_per_thread) { param.block.z++; if ( (coarseColor/coarse_colors_per_thread) % param.block.z == 0) { param.grid.z = (coarseColor/coarse_colors_per_thread) / param.block.z; break; } } // we can advance spin/block-color since this is valid if (param.block.z <= (coarseColor/coarse_colors_per_thread) ) { // return true; } else { // we have run off the end so let's reset param.block.z = 1; param.grid.z = coarseColor/coarse_colors_per_thread; return false; } } int tuningIter() const { return 3; } bool advanceAux(TuneParam &param) const { #ifdef SWIZZLE if (param.aux.x < 2*deviceProp.multiProcessorCount) { param.aux.x++; return true; } else { param.aux.x = 1; return false; } #else return false; #endif } // only tune shared memory per thread (disable tuning for block.z for now) bool advanceTuneParam(TuneParam &param) const { return advanceSharedBytes(param) || advanceAux(param); } TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); } void initTuneParam(TuneParam &param) const { defaultTuneParam(param); } /** sets default values for when tuning is disabled */ void defaultTuneParam(TuneParam &param) const { param.block = dim3(block_size, in.SiteSubset(), 1); param.grid = dim3( (minThreads()+param.block.x-1) / param.block.x, 1, 1); param.shared_bytes = 0; param.block.z = 1; param.grid.z = coarseColor / coarse_colors_per_thread; param.aux.x = 1; // swizzle factor } long long flops() const { return 8 * fineSpin * fineColor * coarseColor * in.SiteSubset()*(long long)in.VolumeCB(); } long long bytes() const { size_t v_bytes = v.Bytes() / (v.SiteSubset() == in.SiteSubset() ? 1 : 2); return in.Bytes() + out.Bytes() + v_bytes + in.SiteSubset()*in.VolumeCB()*sizeof(int); } }; template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor> void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v, const int *fine_to_coarse, const int *coarse_to_fine, int parity) { // for fine grids (Nc=3) have more parallelism so can use more coarse strategy constexpr int coarse_colors_per_thread = fineColor != 3 ? 2 : coarseColor >= 4 && coarseColor % 4 == 0 ? 4 : 2; //coarseColor >= 8 && coarseColor % 8 == 0 ? 8 : coarseColor >= 4 && coarseColor % 4 == 0 ? 4 : 2; if (v.Precision() == QUDA_HALF_PRECISION) { #if QUDA_PRECISION & 2 RestrictLaunch<Float, short, fineSpin, fineColor, coarseSpin, coarseColor, coarse_colors_per_thread> restrictor(out, in, v, fine_to_coarse, coarse_to_fine, parity); restrictor.apply(0); #else errorQuda("QUDA_PRECISION=%d does not enable half precision", QUDA_PRECISION); #endif } else if (v.Precision() == in.Precision()) { RestrictLaunch<Float, Float, fineSpin, fineColor, coarseSpin, coarseColor, coarse_colors_per_thread> restrictor(out, in, v, fine_to_coarse, coarse_to_fine, parity); restrictor.apply(0); } else { errorQuda("Unsupported V precision %d", v.Precision()); } if (checkLocation(out, in, v) == QUDA_CUDA_FIELD_LOCATION) checkCudaError(); } template <typename Float, int fineSpin> void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v, int nVec, const int *fine_to_coarse, const int *coarse_to_fine, const int * const * spin_map, int parity) { if (out.Nspin() != 2) errorQuda("Unsupported nSpin %d", out.Nspin()); const int coarseSpin = 2; // first check that the spin_map matches the spin_mapper spin_mapper<fineSpin,coarseSpin> mapper; for (int s=0; s<fineSpin; s++) for (int p=0; p<2; p++) if (mapper(s,p) != spin_map[s][p]) errorQuda("Spin map does not match spin_mapper"); // Template over fine color if (in.Ncolor() == 3) { // standard QCD const int fineColor = 3; if (nVec == 4) { Restrict<Float,fineSpin,fineColor,coarseSpin,4>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else if (nVec == 6) { // free field Wilson Restrict<Float,fineSpin,fineColor,coarseSpin,6>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else if (nVec == 24) { Restrict<Float,fineSpin,fineColor,coarseSpin,24>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else if (nVec == 32) { Restrict<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else { errorQuda("Unsupported nVec %d", nVec); } } else if (in.Ncolor() == 6) { // Coarsen coarsened Wilson free field const int fineColor = 6; if (nVec == 6) { Restrict<Float,fineSpin,fineColor,coarseSpin,6>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else { errorQuda("Unsupported nVec %d", nVec); } } else if (in.Ncolor() == 24) { // to keep compilation under control coarse grids have same or more colors const int fineColor = 24; if (nVec == 24) { Restrict<Float,fineSpin,fineColor,coarseSpin,24>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else if (nVec == 32) { Restrict<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else { errorQuda("Unsupported nVec %d", nVec); } } else if (in.Ncolor() == 32) { const int fineColor = 32; if (nVec == 32) { Restrict<Float,fineSpin,fineColor,coarseSpin,32>(out, in, v, fine_to_coarse, coarse_to_fine, parity); } else { errorQuda("Unsupported nVec %d", nVec); } } else { errorQuda("Unsupported nColor %d", in.Ncolor()); } } template <typename Float> void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v, int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int * const * spin_map, int parity) { if (in.Nspin() == 2) { Restrict<Float,2>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity); #ifdef GPU_WILSON_DIRAC } else if (in.Nspin() == 4) { Restrict<Float,4>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity); #endif #if GPU_STAGGERED_DIRAC } else if (in.Nspin() == 1) { Restrict<Float,1>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity); #endif } else { errorQuda("Unsupported nSpin %d", in.Nspin()); } } #endif // GPU_MULTIGRID void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v, int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int * const * spin_map, int parity) { #ifdef GPU_MULTIGRID if (out.FieldOrder() != in.FieldOrder() || out.FieldOrder() != v.FieldOrder()) errorQuda("Field orders do not match (out=%d, in=%d, v=%d)", out.FieldOrder(), in.FieldOrder(), v.FieldOrder()); QudaPrecision precision = checkPrecision(out, in); if (precision == QUDA_DOUBLE_PRECISION) { #ifdef GPU_MULTIGRID_DOUBLE Restrict<double>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity); #else errorQuda("Double precision multigrid has not been enabled"); #endif } else if (precision == QUDA_SINGLE_PRECISION) { Restrict<float>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map, parity); } else { errorQuda("Unsupported precision %d", out.Precision()); } #else errorQuda("Multigrid has not been built"); #endif } } // namespace quda
c38bc11e1757c2045a6632b6315a101518fcfc05.hip
// !!! This is a file automatically generated by hipify!!! ///////////////////////////////// // NGUYEN Ba Diep 13/03/2017 // // [email protected] // // class simpleGL.cu // ///////////////////////////////// /* */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <iostream> // OpenGL Graphics includes #include <GL/glew.h> #include <GL/freeglut.h> #include <timer.h> // timing functions #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check #include <helper_cuda_gl.h> // helper functions for CUDA/GL interop #include <hip/hip_vector_types.h> //#include "kernel.hip" #include "defines.h" #include <cassert> #define REFRESH_DELAY 200 //ms //200 :slow 10: very fast prefer:20 int MAXX=128; int MAXY=32; int MAXZ=128; #define SIZE 32 /* **************** global variable for fluid dinamic 2,147,483,648 ***************** #define DT 0.09f // Delta T for interative solver #define VIS 0.0025f // Viscosity constant //do nhot #define FORCE (5.8f*DIM) // Force scale factor #define FR 4 // Force update radius **************************************************************/ #define IX(i,j,k) ((i)+(M+2)*(j) + (M+2)*(N+2)*(k)) #define SWAP(x0,x) {float * tmp=x0;x0=x;x=tmp;} #define MAX(a,b) (((a) > (b)) ? (a) : (b)) #define LINEARSOLVERTIMES 10 bool drawVec = false; //fluid field information int M = SIZE; // grid x int N = SIZE; // grid y int O = SIZE; // grid z float dt = 0.4f; // time delta float diff = 0.0f; // diffuse float visc = 0.0f; // viscosity float force = 10.0f; // added on keypress on an axis float source = 200.0f; // density float source_alpha = 0.05; //for displaying density int addforce[3] = {0, 0, 0}; float * u, * v, *w, * u_prev, * v_prev, * w_prev; float * dens, * dens_prev; void add_source ( int M, int N, int O, float * x, float * s, float dt ) { int i, size=(M+2)*(N+2)*(O+2); for ( i=0 ; i<size ; i++ ) x[i] += dt*s[i]; } void lin_solve ( int M, int N, int O, int b, float * x, float * x0, float a, float c ) { int i, j, k, l; // iterate the solver for ( l=0 ; l<LINEARSOLVERTIMES ; l++ ) { // update for each cell for ( i=1 ; i<=M ; i++ ) { for ( j=1 ; j<=N ; j++ ) { for ( k=1 ; k<=O ; k++ ) { x[IX(i,j,k)] = (x0[IX(i,j,k)] + a*(x[IX(i-1,j,k)]+x[IX(i+1,j,k)]+x[IX(i,j-1,k)]+x[IX(i,j+1,k)]+x[IX(i,j,k-1)]+x[IX(i,j,k+1)]))/c; }}} } } void diffuse ( int M, int N, int O, int b, float * x, float * x0, float diff, float dt ) { int max = MAX(MAX(M, N), MAX(N, O)); float a=dt*diff*max*max*max; lin_solve ( M, N, O, b, x, x0, a, 1+6*a ); } void advect ( int M, int N, int O, int b, float * d, float * d0, float * u, float * v, float * w, float dt ) { int i, j, k, i0, j0, k0, i1, j1, k1; float x, y, z, s0, t0, s1, t1, u1, u0, dtx,dty,dtz; dtx=dty=dtz=dt*MAX(MAX(M, N), MAX(N, O)); for ( i=1 ; i<=M ; i++ ) { for ( j=1 ; j<=N ; j++ ) { for ( k=1 ; k<=O ; k++ ) { x = i-dtx*u[IX(i,j,k)]; y = j-dty*v[IX(i,j,k)]; z = k-dtz*w[IX(i,j,k)]; if (x<0.5f) x=0.5f; if (x>M+0.5f) x=M+0.5f; i0=(int)x; i1=i0+1; if (y<0.5f) y=0.5f; if (y>N+0.5f) y=N+0.5f; j0=(int)y; j1=j0+1; if (z<0.5f) z=0.5f; if (z>O+0.5f) z=O+0.5f; k0=(int)z; k1=k0+1; s1 = x-i0; s0 = 1-s1; t1 = y-j0; t0 = 1-t1; u1 = z-k0; u0 = 1-u1; d[IX(i,j,k)] = s0*(t0*u0*d0[IX(i0,j0,k0)]+t1*u0*d0[IX(i0,j1,k0)]+t0*u1*d0[IX(i0,j0,k1)]+t1*u1*d0[IX(i0,j1,k1)])+ s1*(t0*u0*d0[IX(i1,j0,k0)]+t1*u0*d0[IX(i1,j1,k0)]+t0*u1*d0[IX(i1,j0,k1)]+t1*u1*d0[IX(i1,j1,k1)]); }}} } void project ( int M, int N, int O, float * u, float * v, float * w, float * p, float * div ) { int i, j, k; for ( i=1 ; i<=M ; i++ ) { for ( j=1 ; j<=N ; j++ ) { for ( k=1 ; k<=O ; k++ ) { div[IX(i,j,k)] = -1.0/3.0*((u[IX(i+1,j,k)]-u[IX(i-1,j,k)])/M+(v[IX(i,j+1,k)]-v[IX(i,j-1,k)])/M+(w[IX(i,j,k+1)]-w[IX(i,j,k-1)])/M); p[IX(i,j,k)] = 0; }}} lin_solve ( M, N, O, 0, p, div, 1, 6 ); for ( i=1 ; i<=M ; i++ ) { for ( j=1 ; j<=N ; j++ ) { for ( k=1 ; k<=O ; k++ ) { u[IX(i,j,k)] -= 0.5f*M*(p[IX(i+1,j,k)]-p[IX(i-1,j,k)]); v[IX(i,j,k)] -= 0.5f*M*(p[IX(i,j+1,k)]-p[IX(i,j-1,k)]); w[IX(i,j,k)] -= 0.5f*M*(p[IX(i,j,k+1)]-p[IX(i,j,k-1)]); }}} } void dens_step ( int M, int N, int O, float * x, float * x0, float * u, float * v, float * w, float diff, float dt ) { add_source ( M, N, O, x, x0, dt ); SWAP ( x0, x ); diffuse ( M, N, O, 0, x, x0, diff, dt ); SWAP ( x0, x ); advect ( M, N, O, 0, x, x0, u, v, w, dt ); } void vel_step ( int M, int N, int O, float * u, float * v, float * w, float * u0, float * v0, float * w0, float visc, float dt ) { add_source ( M, N, O, u, u0, dt ); add_source ( M, N, O, v, v0, dt );add_source ( M, N, O, w, w0, dt ); SWAP ( u0, u ); diffuse ( M, N, O, 1, u, u0, visc, dt ); SWAP ( v0, v ); diffuse ( M, N, O, 2, v, v0, visc, dt ); SWAP ( w0, w ); diffuse ( M, N, O, 3, w, w0, visc, dt ); project ( M, N, O, u, v, w, u0, v0 ); SWAP ( u0, u ); SWAP ( v0, v );SWAP ( w0, w ); advect ( M, N, O, 1, u, u0, u0, v0, w0, dt ); advect ( M, N, O, 2, v, v0, u0, v0, w0, dt );advect ( M, N, O, 3, w, w0, u0, v0, w0, dt ); project ( M, N, O, u, v, w, u0, v0 ); } static int allocate_fluiddata (){ int size = (M+2)*(N+2)*(O+2); u = (float *) malloc ( size*sizeof(float) ); v = (float *) malloc ( size*sizeof(float) ); w = (float *) malloc ( size*sizeof(float) ); u_prev = (float *) malloc ( size*sizeof(float) ); v_prev = (float *) malloc ( size*sizeof(float) ); w_prev = (float *) malloc ( size*sizeof(float) ); dens = (float *) malloc ( size*sizeof(float) ); dens_prev = (float *) malloc ( size*sizeof(float) ); if ( !u || !v || !w || !u_prev || !v_prev || !w_prev || !dens || !dens_prev ) { fprintf ( stderr, "cannot allocate data\n" ); return ( 0 ); } return ( 1 ); } static void get_force( float * d, float * u, float * v, float * w ){ int i, j, k, size=(M+2)*(N+2)*(O+2);; for ( i=0 ; i<size ; i++ ) { u[i] = v[i] = w[i]= d[i] = 0.0f; } if(addforce[0]==1) // x { i=2, j=N/2; k=O/2; if ( i<1 || i>M || j<1 || j>N || k <1 || k>O) return; u[IX(i,j,k)] = force*10; addforce[0] = 0; } if(addforce[1]==1) { i=M/2, j=2; k=O/2; if ( i<1 || i>M || j<1 || j>N || k <1 || k>O) return; v[IX(i,j,k)] = force*10; addforce[1] = 0; } if(addforce[2]==1) // y { i=M/2, j=N/2; k=2; if ( i<1 || i>M || j<1 || j>N || k <1 || k>O) return; w[IX(i,j,k)] = force*10; addforce[2] = 0; } return; } bool show3D = true ; float g_fAnim = 0.0; float g_fAnimInc = 0.01f; bool animFlag = true; int runControl = 1; // Auto-Verification Code int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling float avgFPS = 0.0f; unsigned int frameCount = 0; GLint gFramesPerSecond = 0; static GLint Frames = 0; // frames averaged over 1000mS static GLuint Clock; // [milliSeconds] static GLuint PreviousClock = 0; // [milliSeconds] static GLuint NextClock = 0; // [milliSeconds] void FPS(void) { ++Frames; Clock = glutGet(GLUT_ELAPSED_TIME); //has limited resolution, so average over 1000mS if ( Clock < NextClock ) return; gFramesPerSecond = Frames/1; // store the averaged number of frames per second PreviousClock = Clock; NextClock = Clock+1000; // 1000mS=1S in the future Frames=0; } void timerMeasure(int value) { const int desiredFPS=100; glutTimerFunc(1000/desiredFPS, timerMeasure, ++value); //put your specific idle code here //... this code will run at desiredFPS // printf("numframe:%d ",Frames); //end your specific idle code here FPS(); //only call once per frame loop to measure FPS glutPostRedisplay(); } // mouse controls int mouse_old_x, mouse_old_y; int mouse_buttons = 0; float rotate_x = 0.0, rotate_y = 0.0; float translate_z = -3.0; StopWatchInterface *timer = NULL; int *pArgc = NULL; char **pArgv = NULL; #define MAX(a,b) ((a > b) ? a : b) // vbo variables GLuint vbo; struct cudaGraphicsResource *cuda_vbo_resource; void *d_vbo_buffer = NULL; GLuint float_vbo; float4 *floatPos; /* struct cudaGraphicsResource *float_vbo_cuda_resource; void *d_float_vbo_buffer = NULL; */ ////////////////// bien toan cuc luu tru thong tin // /**************************************** ************ Cell & Float **************** *************************************/ bool showFloat = true; int num_floats = 4; FloatType *AllFloats_host = NULL; FloatType *AllFloats_device; bool showCell = true; CellType *AllCells_host = NULL; CellType *AllCells_device; Index *cell_index_host = NULL; Index *cell_index_device; float4 *surfacePos; GLuint surfaceVBO; bool showSurface = true; float *floatcolorred; float *floatcolorgreen; float *floatcolorblue; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void cleanup(); // GL functionality bool initGL(int *argc, char **argv); void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags); void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res); extern __global__ void game_of_life_kernel(float4 *pos, unsigned int maxx,unsigned int maxy, unsigned int maxz, int CAMode, CellType *Cells_device,Index * index_device,bool showMode); extern __global__ void main_simulation(float4 *pos, unsigned int maxx,unsigned int maxy, unsigned int maxz,int CAMode, CellType *Cells_device,Index * index_device); // rendering callbacks void display(); void keyboard(unsigned char key, int x, int y); void mouse(int button, int state, int x, int y); void motion(int x, int y); void timerEvent(int value); void computeFPS(); // Cuda functionality void runCuda(struct cudaGraphicsResource **vbo_resource,int modeCA,CellType *cells_d,Index * index_device); void initCell2D(int CAMode){ long tempid = 0; int num_inactive = 0; for(int j = 0; j < MAXY; ++j){ for(int i = 0; i < MAXX; ++i) { Position temp; temp.x = (float)i/MAXX ; temp.y = (float)j/MAXY ; temp.z = 0.5f; unsigned long long int index = i+MAXY*j; AllCells_host[index].id = tempid; AllCells_host[index].CellPos = temp; int state = rand() % 100 ; // cout << " state = " <<state; if (state %4 ==0) { //Diep random init AllCells_host[index].state = NORMAL ; // cout << " \n NORMAL id = " <<tempid; }else { AllCells_host[index].state = INACTIVE ; // cout << " \n INACTIVE id = " <<tempid; num_inactive ++; } tempid++; } } // cout << " tempid = " <<tempid; if(CAMode==CA_VON_NEUMANN){ //4 neighbor 2D vector<long> neighbor ; for(int j = 0; j < MAXY; ++j){ for(int i = 0; i < MAXX; ++i) { unsigned long long int index = i+MAXY*j; long tempindex[NUM_NEIGHBOR]; if (i>0){//left(x) = (x - 1) % M tempindex[0] = AllCells_host[index-1].id ; }else { tempindex[0] = INVALID_ID ; } if (i<MAXX-1){//right(x) = (x + 1) % M tempindex[1] = AllCells_host[index+1].id ; }else{ tempindex[1] = INVALID_ID ; } if (j>0){//above(x) = (x - M) % (M * N) tempindex[2] = AllCells_host[index-MAXX].id ; }else { tempindex[2] = INVALID_ID ; } if (j<MAXY-1){//below(x) = (x + M) % (M * N) tempindex[3] = AllCells_host[index+MAXX].id ; }else { tempindex[3] = INVALID_ID ; } memcpy(cell_index_host[index].id, tempindex, NUM_NEIGHBOR * sizeof(long)); //CA Diep change size // cell_index_host[i+(j*MAXX)].id = tempindex; /* if(i==2&&j==0){ cout << "\n i+j*MAXX= " << i+j*MAXX << " AllCells id= " <<AllCells[(i+j*MAXX)].id << " neightbors: " << AllCells[((i+j*MAXX)-1)%MAXX].id <<","<< AllCells[((i+j*MAXX)+1)%MAXX].id <<"," << AllCells[((i+j*MAXX)-MAXX)%(MAXX*MAXY)].id <<","<< AllCells[((i+j*MAXX)+MAXX)%(MAXX*MAXY)].id; }*/ } } } // printf("\n done initCell maxid = %d , inactive=%d ",tempid,num_inactive); } void initCell3D(int CAMode){ long tempid = 0; int num_inactive = 0; for(int k=0;k<MAXZ;k++){ for(int j = 0; j < MAXY; j++){ for(int i = 0; i < MAXX; i++) { unsigned long long int index = i+MAXZ*(j+MAXY*k); Position temp; temp.x = (float)i/MAXX ; temp.y = (float)j/MAXY ; temp.z = (float)k/MAXZ ; AllCells_host[index].id = tempid; AllCells_host[index].CellPos = temp; AllCells_host[index].temperature = (rand() % 1000 )/100 ; int state = rand() % 200 ; // cout << " \ni+MAXZ*(j+MAXY*k)= " <<i+MAXZ*(j+MAXY*k) << " tempid="<<tempid; if (state %13 ==0) { //Diep random init AllCells_host[index].state = NORMAL ; // cout << " \n NORMAL id = " <<tempid; }else { AllCells_host[index].state = INACTIVE ; // cout << " \n INACTIVE id = " <<tempid; num_inactive ++; } tempid++; //Flat[x + HEIGHT* (y + WIDTH* z)] // The algorithm is mostly the same. If you have a 3D array Original[HEIGHT, WIDTH, DEPTH] then you could turn it into Flat[HEIGHT * WIDTH * DEPTH] by // Flat[x + WIDTH * (y + DEPTH * z)] = Original[x, y, z] }//end for i MAXX }//end for j MAXY }//end for k MAXZ // cout << " tempid = " <<tempid; if(CAMode==CA_VON_NEUMANN){ //6 neighbor 3D for (int k=0; k<MAXZ;k++){ for(int j = 0; j < MAXY; j++){ for(int i = 0; i < MAXX; i++){ long tempindex[NUM_NEIGHBOR]; if (i>0){//left(x) = (x - 1) % M tempindex[0] = AllCells_host[((i+MAXZ*(j+MAXY*k))-1)].id ; }else { tempindex[0] = INVALID_ID ; } if (i<MAXX-1){//right(x) = (x + 1) % M tempindex[1] = AllCells_host[((i+MAXZ*(j+MAXY*k))+1)].id ; }else{ tempindex[1] = INVALID_ID ; } if (j>0){//above(x) = (x - M) % (M * N) tempindex[2] = AllCells_host[((i+MAXZ*(j-1+MAXY*k)))].id ; }else { tempindex[2] = INVALID_ID ; } if (j<MAXY-1){//below(x) = (x + M) % (M * N) tempindex[3] = AllCells_host[((i+MAXZ*(j+1+MAXY*k)))].id ; }else { tempindex[3] = INVALID_ID ; } if (k>0){//behind (x) = (x - M) % (M * N) tempindex[4] = AllCells_host[(i+MAXZ*(j+MAXY*(k-1)))].id ; }else { tempindex[4] = INVALID_ID ; } if (k<MAXZ-1){//front (x) = (x + M) % (M * N) tempindex[5] = AllCells_host[(i+MAXZ*(j+MAXY*(k+1)))].id ; }else { tempindex[5] = INVALID_ID ; } memcpy(cell_index_host[i+MAXZ*(j+MAXY*k)].id, tempindex, NUM_NEIGHBOR * sizeof(long)); //CA Diep change size // cell_index_host[i+(j*MAXX)].id = tempindex; // if(i==0&&j==1&&k==1){ /* cout <<"\n "<<k<<j<<i <<"|i+MAXZ*(j+MAXY*k)= " << i+MAXZ*(j+MAXY*k) << " AllCells id= " <<AllCells_host[i+MAXZ*(j+MAXY*k)].id << " \n neightbors: "; for (int de=0;de<NUM_NEIGHBOR;de++){ cout << de << ":"<< tempindex[de]<< " |" ; }*/ // } /* if(i==1&&j==1&&k==1){ cout << "\ni+MAXZ*(j+MAXY*k)= " << i+i+MAXZ*(j+MAXY*k) << " AllCells id= " <<AllCells_host[i+MAXZ*(j+MAXY*k)].id << " \n neightbors: "; for (int de=0;de<NUM_NEIGHBOR;de++){ cout << de << ":"<< tempindex[de]<< " |" ; } } */ }//end for i MAXX }//end for j MAXY }//end for k MAXZ }//end if CA Mode // printf("\n done initCell maxid = %d , inactive=%d ",tempid,num_inactive); } void initFloat(){ floatcolorred = (float *)malloc(num_floats*sizeof(float)); floatcolorgreen =(float *)malloc(num_floats*sizeof(float)); floatcolorblue = (float *)malloc(num_floats*sizeof(float)); for (int k=0;k<num_floats; k++){ FloatType tempfloattype; tempfloattype.trajectory = (FloatTrajectoryPoint*)malloc (MAX_TRAJECTORY_SIZE *sizeof(FloatTrajectoryPoint)); tempfloattype.trajectory_size = MAX_TRAJECTORY_SIZE ; for(int j = 0; j < MAX_TRAJECTORY_SIZE; ++j){ FloatTrajectoryPoint temppoint; temppoint.measure = (FloatMeasurement *) malloc (MAX_MEASURE_SIZE*sizeof(FloatMeasurement)); temppoint.measure_size = MAX_MEASURE_SIZE; for(int i = 0; i < MAX_MEASURE_SIZE; ++i) { FloatMeasurement tempmes; tempmes.pressure = (float)(rand() % 200)*10; tempmes.salinity = (float)(rand() % 360)/10; tempmes.temperature = (float)(rand() % 360)/10; temppoint.measure[i] = tempmes; } Position temppos; temppos.x = (float)(rand() % MAXX)/MAXX ; temppos.y = (float)(rand() % MAXY)/MAXY ; temppos.z = (float)(rand() % MAXZ)/MAXZ ; //////////////////// // add date ///// //////////////////// temppoint.FloatPos = temppos; tempfloattype.trajectory[j] = temppoint; } tempfloattype.id =k ; tempfloattype.floatState = DRIFT; AllFloats_host[k] = tempfloattype; // memcpy(cell_index_host[i+(j*MAXX)].id, tempindex, NUM_NEIGHBOR * sizeof(long)); //CA Diep change size floatcolorred[k] = (float)(rand()%100)/100; floatcolorblue[k] = (float)(rand()%100)/100; floatcolorgreen[k] = (float)(rand()%100)/100; } } void initSurface(){ surfacePos = (float4 *) malloc(sizeof(float4)*MAXX*MAXZ); for (int j=0; j<MAXZ; j++){ for (int i=0; i<MAXX;i++){ float x = (float) i/MAXX ; float z = (float) j/MAXZ ; surfacePos[j*MAXZ+i] = make_float4(x, 1.0f, z, 1.0f); } } // assert(surfaceVBO); // create buffer object /* GLuint points_vbo = 0; glGenBuffers(1, &points_vbo); glBindBuffer(GL_ARRAY_BUFFER, points_vbo); glBufferData(GL_ARRAY_BUFFER, 9 * sizeof(float), points, GL_STATIC_DRAW); */ /* glGenBuffers(1, VertexVBOID); glBindBuffer(GL_ARRAY_BUFFER, VertexVBOID); glBufferData(GL_ARRAY_BUFFER, sizeof(MyVertex)*3, &pvertex[0].x, GL_STATIC_DRAW); ushort pindices[3]; pindices[0] = 0; pindices[1] = 1; pindices[2] = 2; glGenBuffers(1, &IndexVBOID); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IndexVBOID); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(ushort)*3, pindices, GL_STATIC_DRAW); */ } void runCudaLoop(int valueControl){ //run 100 per second // cout << " value " << ":"<< valueControl; if (valueControl==0){ //do nothing }else{ runCuda(&cuda_vbo_resource,0,AllCells_device,cell_index_device); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { pArgc = &argc; pArgv = argv; setenv ("DISPLAY", ":0", 0); sdkCreateTimer(&timer); int arraycellsize = MAXX*MAXY*MAXZ*sizeof(CellType); int arrayindex = MAXX*MAXY*MAXZ*sizeof(Index); int arrayfloatsize = num_floats*sizeof(FloatType); //Allocating memory of host variables AllCells_host = (CellType*) malloc(arraycellsize); cell_index_host = (Index*) malloc(arrayindex); AllFloats_host = (FloatType *) malloc(arrayfloatsize); //Allocating memory to device variable if(show3D){ initCell3D(CA_VON_NEUMANN); }else{ initCell2D(CA_VON_NEUMANN); } // if(showSurface){ initSurface(); } if(showFloat){ initFloat(); } // allocate_fluiddata (); // int arraycellsize = MAXX*MAXY*sizeof(CellType); checkCudaErrors(hipMalloc((CellType**)&AllCells_device,arraycellsize)); checkCudaErrors(hipMemcpy(AllCells_device, AllCells_host, arraycellsize, hipMemcpyHostToDevice)); // int arrayindex = MAXX*MAXY*sizeof(Index); checkCudaErrors(hipMalloc(( Index** ) &cell_index_device,arrayindex)); checkCudaErrors(hipMemcpy(cell_index_device, cell_index_host, arrayindex, hipMemcpyHostToDevice)); //cout<<" id = 551 [x,y]= [" << AllCells[551].CellPos.x<<","<<AllCells[551].CellPos.y<< "]"; //cout<< "\n neighbor: "; if (false == initGL(&argc, argv)) { return false; } if (checkCmdLineFlag(argc, (const char **)argv, "device")) { if (gpuGLDeviceInit(argc, (const char **)argv) == -1) { return false; } } else { hipGLSetGLDevice(gpuGetMaxGflopsDeviceId()); } glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMouseFunc(mouse); glutMotionFunc(motion); glutCloseFunc(cleanup); // glutTimerFunc(0,timerMeasure,0); createVBO(&vbo, &cuda_vbo_resource, hipGraphicsMapFlagsWriteDiscard); glutMainLoop(); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource,int modeCA,CellType *Cells_device,Index *index_device) { /* get_force( dens_prev, u_prev, v_prev, w_prev ); vel_step ( M,N,O, u, v, w, u_prev, v_prev,w_prev, visc, dt ); dens_step ( M,N,O, dens, dens_prev, u, v, w, diff, dt ); */ float4 *dptr; checkCudaErrors(hipGraphicsMapResources(1, vbo_resource, 0)); size_t num_bytes; checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource)); dim3 block(8, 8, 8); dim3 grid(MAXX / block.x, MAXY / block.y, MAXZ/block.z); //game_of_life_kernel<<< grid, block>>>(dptr, MAXX,MAXY,MAXZ, modeCA,Cells_device,index_device,show3D); hipLaunchKernelGGL(( main_simulation), dim3(grid), dim3(block), 0, 0, dptr, MAXX,MAXY,MAXZ, modeCA,Cells_device,index_device); checkCudaErrors(hipGraphicsUnmapResources(1, vbo_resource, 0)); } //////////////////////////////////////////////////////////////////////////////// //! Create VBO //////////////////////////////////////////////////////////////////////////////// void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags) { assert(vbo); glGenBuffers(1, vbo); glBindBuffer(GL_ARRAY_BUFFER, *vbo); unsigned int size = MAXX * MAXY * MAXZ * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); checkCudaErrors(hipGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags)); SDK_CHECK_ERROR_GL(); } static void draw_velocity (){ int i, j, k; float x, y, z, h; h = 1.0f/MAX(MAX(M, N), MAX(N, O)); glColor3f ( 0.5f, 0.5f, 0.5f ); glLineWidth ( 1.0f ); glBegin ( GL_LINES ); for ( i=1; i<=M; i++ ) { x = (i-0.5f)*h; for ( j=1; j<=N; j++ ) { y = (j-0.5f)*h; for ( k=1; k<=O; k++ ) { z = (k-0.5f)*h; glVertex3f ( x, y, z ); glVertex3f ( x+u[IX(i,j,k)], y+v[IX(i,j,k)], z+w[IX(i,j,k)] ); } } } glEnd (); } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display() { // cout<<"\n average time = "<<sdkGetAverageTimerValue(&timer) / 1000.f ; /* if (sdkGetAverageTimerValue(&timer)>0.1) { sdkStopTimer(&timer);sdkStartTimer(&timer); return; } */ sdkStartTimer(&timer); glutTimerFunc(1000/g_fAnim, runCudaLoop, runControl); // run CUDA kernel to generate vertex positions // runCuda(&cuda_vbo_resource,0,AllCells_device,cell_index_device); // hipDeviceSynchronize(); // checkCudaErrors(hipMemcpy(AllCells_host,AllCells_device, arraycellsize, hipMemcpyDeviceToHost)); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glTranslatef(0.0, 0.0, translate_z); glRotatef(rotate_x, 1.0, 0.0, 0.0); glRotatef(rotate_y, 0.0, 1.0, 0.0); if(showCell) { glPointSize(3.0f); glBindBuffer(GL_ARRAY_BUFFER, vbo); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor4f(1.0, 0.0, 0.0,0.5f); glDrawArrays(GL_POINTS, 0, MAXX*MAXY*MAXZ); glDisableClientState(GL_VERTEX_ARRAY); } if(showSurface){ glGenBuffers(1, &surfaceVBO); glBindBuffer(GL_ARRAY_BUFFER, surfaceVBO); unsigned int size = MAXX * MAXZ * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, surfacePos, GL_STATIC_DRAW); // glBindBuffer(GL_ARRAY_BUFFER, surfaceVBO); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor4f(0.0, 0.0, 1.0f,1.0f); glDrawArrays(GL_POINTS, 0, MAXX*MAXZ); glDisableClientState(GL_VERTEX_ARRAY); glBindBuffer(GL_ARRAY_BUFFER, 0); } if(showFloat){ GLuint float_vbo; float4 *floatPos; for(int k=0;k<num_floats;k++){ glGenBuffers(1, &float_vbo); glBindBuffer(GL_ARRAY_BUFFER, float_vbo); unsigned int trajecsize = AllFloats_host[k].trajectory_size * 4 * sizeof(float); floatPos = (float4*) malloc (trajecsize); for(int i =0; i<AllFloats_host[k].trajectory_size;i++){ floatPos[i] = make_float4(AllFloats_host[k].trajectory[i].FloatPos.x, AllFloats_host[k].trajectory[i].FloatPos.z, AllFloats_host[k].trajectory[i].FloatPos.y, 1.0f); } glBufferData(GL_ARRAY_BUFFER, trajecsize, floatPos, GL_STATIC_DRAW); // glBindBuffer(GL_ARRAY_BUFFER, surfaceVBO); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor4f(floatcolorred[k] , floatcolorgreen[k] , floatcolorblue[k] ,1.0f); glDrawArrays(GL_LINE_STRIP, 0, AllFloats_host[k].trajectory_size); // void glutWireSphere(GLdouble radius, GLint slices, GLint stacks); glDisableClientState(GL_VERTEX_ARRAY); glBindBuffer(GL_ARRAY_BUFFER, 0); } } //show sample vertical z color red // float4 *verticalPos; // unsigned int versize = 256 * sizeof(float); //verticalPos = (float4*) malloc (versize); /* for(int k=0; k< 256; k++){ float z = (float)k/256; glPointSize(3.0f); glBegin (GL_POINTS); glVertex3f (0.5,z,0.5 ); glColor3f (z,0,0); glEnd(); } if(drawVec) draw_velocity (); */ // glColor3f(1.0,0.0,0.0); // glLoadIdentity(); // glutWireSphere( 0.05, 8, 4); // glFlush(); glutSwapBuffers(); g_fAnim += g_fAnimInc; if(animFlag) { // glutPostRedisplay(); } sdkStopTimer(&timer); computeFPS(); } void timerEvent(int value) { if (glutGetWindow()) { glutPostRedisplay(); glutTimerFunc(REFRESH_DELAY, timerEvent,0); } } //////////////////////////////////////////////////////////////////////////////// //! Delete VBO //////////////////////////////////////////////////////////////////////////////// void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res) { checkCudaErrors(hipGraphicsUnregisterResource(vbo_res)); glBindBuffer(1, *vbo); glDeleteBuffers(1, vbo); *vbo = 0; } void cleanup() { sdkDeleteTimer(&timer); if (vbo) { deleteVBO(&vbo, cuda_vbo_resource); } hipDeviceReset(); hipFree(AllFloats_device); hipFree(AllCells_device); hipFree(cell_index_device); free(AllFloats_host); free(AllCells_host); free(cell_index_host); free(floatPos); free(surfacePos); free(floatcolorred); free(floatcolorgreen); free(floatcolorblue); } //////////////////////////////////////////////////////////////////////////////// //! Keyboard events handler //////////////////////////////////////////////////////////////////////////////// void keyboard(unsigned char key, int /*x*/, int /*y*/) { switch (key) { case (27) : glutDestroyWindow(glutGetWindow()); return; case 'a': // toggle animation case 'A': animFlag = (animFlag)?0:1; break; case '-': // decrease the time increment for the CUDA kernel g_fAnimInc -= 0.01; break; case '+': // increase the time increment for the CUDA kernel g_fAnimInc += 0.01; break; case 'r': // reset the time increment g_fAnimInc = 0.01; break; case 'f':showFloat = !showFloat; break; case 's': case 'S': runControl ++; runControl %=2; break; case 'z': showSurface = !showSurface; break; } glutPostRedisplay(); } //////////////////////////////////////////////////////////////////////////////// //! Mouse event handlers //////////////////////////////////////////////////////////////////////////////// void mouse(int button, int state, int x, int y) { if (state == GLUT_DOWN) { mouse_buttons |= 1<<button; } else if (state == GLUT_UP) { mouse_buttons = 0; } mouse_old_x = x; mouse_old_y = y; } void motion(int x, int y) { float dx, dy; dx = (float)(x - mouse_old_x); dy = (float)(y - mouse_old_y); if (mouse_buttons & 1) { rotate_x += dy * 0.2f; rotate_y += dx * 0.2f; } else if (mouse_buttons & 4) { translate_z += dy * 0.01f; } mouse_old_x = x; mouse_old_y = y; } void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)MAX(avgFPS, 1.f); sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "Cuda GL Float: %3.1f fps (Max 100Hz)", avgFPS); glutSetWindowTitle(fps); } //////////////////////////////////////////////////////////////////////////////// //! Initialize GL //////////////////////////////////////////////////////////////////////////////// bool initGL(int *argc, char **argv) { glutInit(argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Cuda GL Float"); glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMotionFunc(motion); glutTimerFunc(REFRESH_DELAY, timerEvent,0); // initialize necessary OpenGL extensions glewInit(); if (! glewIsSupported("GL_VERSION_2_0 ")) { fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush(stderr); return false; } glClearColor(0.0, 0.0, 0.0, 1.0); // Enable depth test glEnable(GL_DEPTH_TEST); // Accept fragment if it closer to the camera than the former one glDepthFunc(GL_LESS); glEnable(GL_BLEND); //enable alpha color glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);//enable alpha color glViewport(0, 0, window_width, window_height); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 10.0); // float attenuation[] = {1.0f, -0.01f, -.000001f}; // glPointParameterfv(GL_POINT_DISTANCE_ATTENUATION, attenuation, 0); // glPointParameter(GL_POINT_DISTANCE_ATTENUATION,1.0f,-0.01f,-.000001f); // glEnable(GL_POINT_DISTANCE_ATTENTUATION); SDK_CHECK_ERROR_GL(); return true; }
c38bc11e1757c2045a6632b6315a101518fcfc05.cu
///////////////////////////////// // NGUYEN Ba Diep 13/03/2017 // // [email protected] // // class simpleGL.cu // ///////////////////////////////// /* */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <iostream> // OpenGL Graphics includes #include <GL/glew.h> #include <GL/freeglut.h> #include <timer.h> // timing functions #include <cuda_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check #include <helper_cuda_gl.h> // helper functions for CUDA/GL interop #include <vector_types.h> //#include "kernel.cu" #include "defines.h" #include <cassert> #define REFRESH_DELAY 200 //ms //200 :slow 10: very fast prefer:20 int MAXX=128; int MAXY=32; int MAXZ=128; #define SIZE 32 /* **************** global variable for fluid dinamic 2,147,483,648 ***************** #define DT 0.09f // Delta T for interative solver #define VIS 0.0025f // Viscosity constant //do nhot #define FORCE (5.8f*DIM) // Force scale factor #define FR 4 // Force update radius **************************************************************/ #define IX(i,j,k) ((i)+(M+2)*(j) + (M+2)*(N+2)*(k)) #define SWAP(x0,x) {float * tmp=x0;x0=x;x=tmp;} #define MAX(a,b) (((a) > (b)) ? (a) : (b)) #define LINEARSOLVERTIMES 10 bool drawVec = false; //fluid field information int M = SIZE; // grid x int N = SIZE; // grid y int O = SIZE; // grid z float dt = 0.4f; // time delta float diff = 0.0f; // diffuse float visc = 0.0f; // viscosity float force = 10.0f; // added on keypress on an axis float source = 200.0f; // density float source_alpha = 0.05; //for displaying density int addforce[3] = {0, 0, 0}; float * u, * v, *w, * u_prev, * v_prev, * w_prev; float * dens, * dens_prev; void add_source ( int M, int N, int O, float * x, float * s, float dt ) { int i, size=(M+2)*(N+2)*(O+2); for ( i=0 ; i<size ; i++ ) x[i] += dt*s[i]; } void lin_solve ( int M, int N, int O, int b, float * x, float * x0, float a, float c ) { int i, j, k, l; // iterate the solver for ( l=0 ; l<LINEARSOLVERTIMES ; l++ ) { // update for each cell for ( i=1 ; i<=M ; i++ ) { for ( j=1 ; j<=N ; j++ ) { for ( k=1 ; k<=O ; k++ ) { x[IX(i,j,k)] = (x0[IX(i,j,k)] + a*(x[IX(i-1,j,k)]+x[IX(i+1,j,k)]+x[IX(i,j-1,k)]+x[IX(i,j+1,k)]+x[IX(i,j,k-1)]+x[IX(i,j,k+1)]))/c; }}} } } void diffuse ( int M, int N, int O, int b, float * x, float * x0, float diff, float dt ) { int max = MAX(MAX(M, N), MAX(N, O)); float a=dt*diff*max*max*max; lin_solve ( M, N, O, b, x, x0, a, 1+6*a ); } void advect ( int M, int N, int O, int b, float * d, float * d0, float * u, float * v, float * w, float dt ) { int i, j, k, i0, j0, k0, i1, j1, k1; float x, y, z, s0, t0, s1, t1, u1, u0, dtx,dty,dtz; dtx=dty=dtz=dt*MAX(MAX(M, N), MAX(N, O)); for ( i=1 ; i<=M ; i++ ) { for ( j=1 ; j<=N ; j++ ) { for ( k=1 ; k<=O ; k++ ) { x = i-dtx*u[IX(i,j,k)]; y = j-dty*v[IX(i,j,k)]; z = k-dtz*w[IX(i,j,k)]; if (x<0.5f) x=0.5f; if (x>M+0.5f) x=M+0.5f; i0=(int)x; i1=i0+1; if (y<0.5f) y=0.5f; if (y>N+0.5f) y=N+0.5f; j0=(int)y; j1=j0+1; if (z<0.5f) z=0.5f; if (z>O+0.5f) z=O+0.5f; k0=(int)z; k1=k0+1; s1 = x-i0; s0 = 1-s1; t1 = y-j0; t0 = 1-t1; u1 = z-k0; u0 = 1-u1; d[IX(i,j,k)] = s0*(t0*u0*d0[IX(i0,j0,k0)]+t1*u0*d0[IX(i0,j1,k0)]+t0*u1*d0[IX(i0,j0,k1)]+t1*u1*d0[IX(i0,j1,k1)])+ s1*(t0*u0*d0[IX(i1,j0,k0)]+t1*u0*d0[IX(i1,j1,k0)]+t0*u1*d0[IX(i1,j0,k1)]+t1*u1*d0[IX(i1,j1,k1)]); }}} } void project ( int M, int N, int O, float * u, float * v, float * w, float * p, float * div ) { int i, j, k; for ( i=1 ; i<=M ; i++ ) { for ( j=1 ; j<=N ; j++ ) { for ( k=1 ; k<=O ; k++ ) { div[IX(i,j,k)] = -1.0/3.0*((u[IX(i+1,j,k)]-u[IX(i-1,j,k)])/M+(v[IX(i,j+1,k)]-v[IX(i,j-1,k)])/M+(w[IX(i,j,k+1)]-w[IX(i,j,k-1)])/M); p[IX(i,j,k)] = 0; }}} lin_solve ( M, N, O, 0, p, div, 1, 6 ); for ( i=1 ; i<=M ; i++ ) { for ( j=1 ; j<=N ; j++ ) { for ( k=1 ; k<=O ; k++ ) { u[IX(i,j,k)] -= 0.5f*M*(p[IX(i+1,j,k)]-p[IX(i-1,j,k)]); v[IX(i,j,k)] -= 0.5f*M*(p[IX(i,j+1,k)]-p[IX(i,j-1,k)]); w[IX(i,j,k)] -= 0.5f*M*(p[IX(i,j,k+1)]-p[IX(i,j,k-1)]); }}} } void dens_step ( int M, int N, int O, float * x, float * x0, float * u, float * v, float * w, float diff, float dt ) { add_source ( M, N, O, x, x0, dt ); SWAP ( x0, x ); diffuse ( M, N, O, 0, x, x0, diff, dt ); SWAP ( x0, x ); advect ( M, N, O, 0, x, x0, u, v, w, dt ); } void vel_step ( int M, int N, int O, float * u, float * v, float * w, float * u0, float * v0, float * w0, float visc, float dt ) { add_source ( M, N, O, u, u0, dt ); add_source ( M, N, O, v, v0, dt );add_source ( M, N, O, w, w0, dt ); SWAP ( u0, u ); diffuse ( M, N, O, 1, u, u0, visc, dt ); SWAP ( v0, v ); diffuse ( M, N, O, 2, v, v0, visc, dt ); SWAP ( w0, w ); diffuse ( M, N, O, 3, w, w0, visc, dt ); project ( M, N, O, u, v, w, u0, v0 ); SWAP ( u0, u ); SWAP ( v0, v );SWAP ( w0, w ); advect ( M, N, O, 1, u, u0, u0, v0, w0, dt ); advect ( M, N, O, 2, v, v0, u0, v0, w0, dt );advect ( M, N, O, 3, w, w0, u0, v0, w0, dt ); project ( M, N, O, u, v, w, u0, v0 ); } static int allocate_fluiddata (){ int size = (M+2)*(N+2)*(O+2); u = (float *) malloc ( size*sizeof(float) ); v = (float *) malloc ( size*sizeof(float) ); w = (float *) malloc ( size*sizeof(float) ); u_prev = (float *) malloc ( size*sizeof(float) ); v_prev = (float *) malloc ( size*sizeof(float) ); w_prev = (float *) malloc ( size*sizeof(float) ); dens = (float *) malloc ( size*sizeof(float) ); dens_prev = (float *) malloc ( size*sizeof(float) ); if ( !u || !v || !w || !u_prev || !v_prev || !w_prev || !dens || !dens_prev ) { fprintf ( stderr, "cannot allocate data\n" ); return ( 0 ); } return ( 1 ); } static void get_force( float * d, float * u, float * v, float * w ){ int i, j, k, size=(M+2)*(N+2)*(O+2);; for ( i=0 ; i<size ; i++ ) { u[i] = v[i] = w[i]= d[i] = 0.0f; } if(addforce[0]==1) // x { i=2, j=N/2; k=O/2; if ( i<1 || i>M || j<1 || j>N || k <1 || k>O) return; u[IX(i,j,k)] = force*10; addforce[0] = 0; } if(addforce[1]==1) { i=M/2, j=2; k=O/2; if ( i<1 || i>M || j<1 || j>N || k <1 || k>O) return; v[IX(i,j,k)] = force*10; addforce[1] = 0; } if(addforce[2]==1) // y { i=M/2, j=N/2; k=2; if ( i<1 || i>M || j<1 || j>N || k <1 || k>O) return; w[IX(i,j,k)] = force*10; addforce[2] = 0; } return; } bool show3D = true ; float g_fAnim = 0.0; float g_fAnimInc = 0.01f; bool animFlag = true; int runControl = 1; // Auto-Verification Code int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling float avgFPS = 0.0f; unsigned int frameCount = 0; GLint gFramesPerSecond = 0; static GLint Frames = 0; // frames averaged over 1000mS static GLuint Clock; // [milliSeconds] static GLuint PreviousClock = 0; // [milliSeconds] static GLuint NextClock = 0; // [milliSeconds] void FPS(void) { ++Frames; Clock = glutGet(GLUT_ELAPSED_TIME); //has limited resolution, so average over 1000mS if ( Clock < NextClock ) return; gFramesPerSecond = Frames/1; // store the averaged number of frames per second PreviousClock = Clock; NextClock = Clock+1000; // 1000mS=1S in the future Frames=0; } void timerMeasure(int value) { const int desiredFPS=100; glutTimerFunc(1000/desiredFPS, timerMeasure, ++value); //put your specific idle code here //... this code will run at desiredFPS // printf("numframe:%d ",Frames); //end your specific idle code here FPS(); //only call once per frame loop to measure FPS glutPostRedisplay(); } // mouse controls int mouse_old_x, mouse_old_y; int mouse_buttons = 0; float rotate_x = 0.0, rotate_y = 0.0; float translate_z = -3.0; StopWatchInterface *timer = NULL; int *pArgc = NULL; char **pArgv = NULL; #define MAX(a,b) ((a > b) ? a : b) // vbo variables GLuint vbo; struct cudaGraphicsResource *cuda_vbo_resource; void *d_vbo_buffer = NULL; GLuint float_vbo; float4 *floatPos; /* struct cudaGraphicsResource *float_vbo_cuda_resource; void *d_float_vbo_buffer = NULL; */ ////////////////// bien toan cuc luu tru thong tin // /**************************************** ************ Cell & Float **************** *************************************/ bool showFloat = true; int num_floats = 4; FloatType *AllFloats_host = NULL; FloatType *AllFloats_device; bool showCell = true; CellType *AllCells_host = NULL; CellType *AllCells_device; Index *cell_index_host = NULL; Index *cell_index_device; float4 *surfacePos; GLuint surfaceVBO; bool showSurface = true; float *floatcolorred; float *floatcolorgreen; float *floatcolorblue; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void cleanup(); // GL functionality bool initGL(int *argc, char **argv); void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags); void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res); extern __global__ void game_of_life_kernel(float4 *pos, unsigned int maxx,unsigned int maxy, unsigned int maxz, int CAMode, CellType *Cells_device,Index * index_device,bool showMode); extern __global__ void main_simulation(float4 *pos, unsigned int maxx,unsigned int maxy, unsigned int maxz,int CAMode, CellType *Cells_device,Index * index_device); // rendering callbacks void display(); void keyboard(unsigned char key, int x, int y); void mouse(int button, int state, int x, int y); void motion(int x, int y); void timerEvent(int value); void computeFPS(); // Cuda functionality void runCuda(struct cudaGraphicsResource **vbo_resource,int modeCA,CellType *cells_d,Index * index_device); void initCell2D(int CAMode){ long tempid = 0; int num_inactive = 0; for(int j = 0; j < MAXY; ++j){ for(int i = 0; i < MAXX; ++i) { Position temp; temp.x = (float)i/MAXX ; temp.y = (float)j/MAXY ; temp.z = 0.5f; unsigned long long int index = i+MAXY*j; AllCells_host[index].id = tempid; AllCells_host[index].CellPos = temp; int state = rand() % 100 ; // cout << " state = " <<state; if (state %4 ==0) { //Diep random init AllCells_host[index].state = NORMAL ; // cout << " \n NORMAL id = " <<tempid; }else { AllCells_host[index].state = INACTIVE ; // cout << " \n INACTIVE id = " <<tempid; num_inactive ++; } tempid++; } } // cout << " tempid = " <<tempid; if(CAMode==CA_VON_NEUMANN){ //4 neighbor 2D vector<long> neighbor ; for(int j = 0; j < MAXY; ++j){ for(int i = 0; i < MAXX; ++i) { unsigned long long int index = i+MAXY*j; long tempindex[NUM_NEIGHBOR]; if (i>0){//left(x) = (x - 1) % M tempindex[0] = AllCells_host[index-1].id ; }else { tempindex[0] = INVALID_ID ; } if (i<MAXX-1){//right(x) = (x + 1) % M tempindex[1] = AllCells_host[index+1].id ; }else{ tempindex[1] = INVALID_ID ; } if (j>0){//above(x) = (x - M) % (M * N) tempindex[2] = AllCells_host[index-MAXX].id ; }else { tempindex[2] = INVALID_ID ; } if (j<MAXY-1){//below(x) = (x + M) % (M * N) tempindex[3] = AllCells_host[index+MAXX].id ; }else { tempindex[3] = INVALID_ID ; } memcpy(cell_index_host[index].id, tempindex, NUM_NEIGHBOR * sizeof(long)); //CA Diep change size // cell_index_host[i+(j*MAXX)].id = tempindex; /* if(i==2&&j==0){ cout << "\n i+j*MAXX= " << i+j*MAXX << " AllCells id= " <<AllCells[(i+j*MAXX)].id << " neightbors: " << AllCells[((i+j*MAXX)-1)%MAXX].id <<","<< AllCells[((i+j*MAXX)+1)%MAXX].id <<"," << AllCells[((i+j*MAXX)-MAXX)%(MAXX*MAXY)].id <<","<< AllCells[((i+j*MAXX)+MAXX)%(MAXX*MAXY)].id; }*/ } } } // printf("\n done initCell maxid = %d , inactive=%d ",tempid,num_inactive); } void initCell3D(int CAMode){ long tempid = 0; int num_inactive = 0; for(int k=0;k<MAXZ;k++){ for(int j = 0; j < MAXY; j++){ for(int i = 0; i < MAXX; i++) { unsigned long long int index = i+MAXZ*(j+MAXY*k); Position temp; temp.x = (float)i/MAXX ; temp.y = (float)j/MAXY ; temp.z = (float)k/MAXZ ; AllCells_host[index].id = tempid; AllCells_host[index].CellPos = temp; AllCells_host[index].temperature = (rand() % 1000 )/100 ; int state = rand() % 200 ; // cout << " \ni+MAXZ*(j+MAXY*k)= " <<i+MAXZ*(j+MAXY*k) << " tempid="<<tempid; if (state %13 ==0) { //Diep random init AllCells_host[index].state = NORMAL ; // cout << " \n NORMAL id = " <<tempid; }else { AllCells_host[index].state = INACTIVE ; // cout << " \n INACTIVE id = " <<tempid; num_inactive ++; } tempid++; //Flat[x + HEIGHT* (y + WIDTH* z)] // The algorithm is mostly the same. If you have a 3D array Original[HEIGHT, WIDTH, DEPTH] then you could turn it into Flat[HEIGHT * WIDTH * DEPTH] by // Flat[x + WIDTH * (y + DEPTH * z)] = Original[x, y, z] }//end for i MAXX }//end for j MAXY }//end for k MAXZ // cout << " tempid = " <<tempid; if(CAMode==CA_VON_NEUMANN){ //6 neighbor 3D for (int k=0; k<MAXZ;k++){ for(int j = 0; j < MAXY; j++){ for(int i = 0; i < MAXX; i++){ long tempindex[NUM_NEIGHBOR]; if (i>0){//left(x) = (x - 1) % M tempindex[0] = AllCells_host[((i+MAXZ*(j+MAXY*k))-1)].id ; }else { tempindex[0] = INVALID_ID ; } if (i<MAXX-1){//right(x) = (x + 1) % M tempindex[1] = AllCells_host[((i+MAXZ*(j+MAXY*k))+1)].id ; }else{ tempindex[1] = INVALID_ID ; } if (j>0){//above(x) = (x - M) % (M * N) tempindex[2] = AllCells_host[((i+MAXZ*(j-1+MAXY*k)))].id ; }else { tempindex[2] = INVALID_ID ; } if (j<MAXY-1){//below(x) = (x + M) % (M * N) tempindex[3] = AllCells_host[((i+MAXZ*(j+1+MAXY*k)))].id ; }else { tempindex[3] = INVALID_ID ; } if (k>0){//behind (x) = (x - M) % (M * N) tempindex[4] = AllCells_host[(i+MAXZ*(j+MAXY*(k-1)))].id ; }else { tempindex[4] = INVALID_ID ; } if (k<MAXZ-1){//front (x) = (x + M) % (M * N) tempindex[5] = AllCells_host[(i+MAXZ*(j+MAXY*(k+1)))].id ; }else { tempindex[5] = INVALID_ID ; } memcpy(cell_index_host[i+MAXZ*(j+MAXY*k)].id, tempindex, NUM_NEIGHBOR * sizeof(long)); //CA Diep change size // cell_index_host[i+(j*MAXX)].id = tempindex; // if(i==0&&j==1&&k==1){ /* cout <<"\n "<<k<<j<<i <<"|i+MAXZ*(j+MAXY*k)= " << i+MAXZ*(j+MAXY*k) << " AllCells id= " <<AllCells_host[i+MAXZ*(j+MAXY*k)].id << " \n neightbors: "; for (int de=0;de<NUM_NEIGHBOR;de++){ cout << de << ":"<< tempindex[de]<< " |" ; }*/ // } /* if(i==1&&j==1&&k==1){ cout << "\ni+MAXZ*(j+MAXY*k)= " << i+i+MAXZ*(j+MAXY*k) << " AllCells id= " <<AllCells_host[i+MAXZ*(j+MAXY*k)].id << " \n neightbors: "; for (int de=0;de<NUM_NEIGHBOR;de++){ cout << de << ":"<< tempindex[de]<< " |" ; } } */ }//end for i MAXX }//end for j MAXY }//end for k MAXZ }//end if CA Mode // printf("\n done initCell maxid = %d , inactive=%d ",tempid,num_inactive); } void initFloat(){ floatcolorred = (float *)malloc(num_floats*sizeof(float)); floatcolorgreen =(float *)malloc(num_floats*sizeof(float)); floatcolorblue = (float *)malloc(num_floats*sizeof(float)); for (int k=0;k<num_floats; k++){ FloatType tempfloattype; tempfloattype.trajectory = (FloatTrajectoryPoint*)malloc (MAX_TRAJECTORY_SIZE *sizeof(FloatTrajectoryPoint)); tempfloattype.trajectory_size = MAX_TRAJECTORY_SIZE ; for(int j = 0; j < MAX_TRAJECTORY_SIZE; ++j){ FloatTrajectoryPoint temppoint; temppoint.measure = (FloatMeasurement *) malloc (MAX_MEASURE_SIZE*sizeof(FloatMeasurement)); temppoint.measure_size = MAX_MEASURE_SIZE; for(int i = 0; i < MAX_MEASURE_SIZE; ++i) { FloatMeasurement tempmes; tempmes.pressure = (float)(rand() % 200)*10; tempmes.salinity = (float)(rand() % 360)/10; tempmes.temperature = (float)(rand() % 360)/10; temppoint.measure[i] = tempmes; } Position temppos; temppos.x = (float)(rand() % MAXX)/MAXX ; temppos.y = (float)(rand() % MAXY)/MAXY ; temppos.z = (float)(rand() % MAXZ)/MAXZ ; //////////////////// // add date ///// //////////////////// temppoint.FloatPos = temppos; tempfloattype.trajectory[j] = temppoint; } tempfloattype.id =k ; tempfloattype.floatState = DRIFT; AllFloats_host[k] = tempfloattype; // memcpy(cell_index_host[i+(j*MAXX)].id, tempindex, NUM_NEIGHBOR * sizeof(long)); //CA Diep change size floatcolorred[k] = (float)(rand()%100)/100; floatcolorblue[k] = (float)(rand()%100)/100; floatcolorgreen[k] = (float)(rand()%100)/100; } } void initSurface(){ surfacePos = (float4 *) malloc(sizeof(float4)*MAXX*MAXZ); for (int j=0; j<MAXZ; j++){ for (int i=0; i<MAXX;i++){ float x = (float) i/MAXX ; float z = (float) j/MAXZ ; surfacePos[j*MAXZ+i] = make_float4(x, 1.0f, z, 1.0f); } } // assert(surfaceVBO); // create buffer object /* GLuint points_vbo = 0; glGenBuffers(1, &points_vbo); glBindBuffer(GL_ARRAY_BUFFER, points_vbo); glBufferData(GL_ARRAY_BUFFER, 9 * sizeof(float), points, GL_STATIC_DRAW); */ /* glGenBuffers(1, VertexVBOID); glBindBuffer(GL_ARRAY_BUFFER, VertexVBOID); glBufferData(GL_ARRAY_BUFFER, sizeof(MyVertex)*3, &pvertex[0].x, GL_STATIC_DRAW); ushort pindices[3]; pindices[0] = 0; pindices[1] = 1; pindices[2] = 2; glGenBuffers(1, &IndexVBOID); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IndexVBOID); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(ushort)*3, pindices, GL_STATIC_DRAW); */ } void runCudaLoop(int valueControl){ //run 100 per second // cout << " value " << ":"<< valueControl; if (valueControl==0){ //do nothing }else{ runCuda(&cuda_vbo_resource,0,AllCells_device,cell_index_device); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { pArgc = &argc; pArgv = argv; setenv ("DISPLAY", ":0", 0); sdkCreateTimer(&timer); int arraycellsize = MAXX*MAXY*MAXZ*sizeof(CellType); int arrayindex = MAXX*MAXY*MAXZ*sizeof(Index); int arrayfloatsize = num_floats*sizeof(FloatType); //Allocating memory of host variables AllCells_host = (CellType*) malloc(arraycellsize); cell_index_host = (Index*) malloc(arrayindex); AllFloats_host = (FloatType *) malloc(arrayfloatsize); //Allocating memory to device variable if(show3D){ initCell3D(CA_VON_NEUMANN); }else{ initCell2D(CA_VON_NEUMANN); } // if(showSurface){ initSurface(); } if(showFloat){ initFloat(); } // allocate_fluiddata (); // int arraycellsize = MAXX*MAXY*sizeof(CellType); checkCudaErrors(cudaMalloc((CellType**)&AllCells_device,arraycellsize)); checkCudaErrors(cudaMemcpy(AllCells_device, AllCells_host, arraycellsize, cudaMemcpyHostToDevice)); // int arrayindex = MAXX*MAXY*sizeof(Index); checkCudaErrors(cudaMalloc(( Index** ) &cell_index_device,arrayindex)); checkCudaErrors(cudaMemcpy(cell_index_device, cell_index_host, arrayindex, cudaMemcpyHostToDevice)); //cout<<" id = 551 [x,y]= [" << AllCells[551].CellPos.x<<","<<AllCells[551].CellPos.y<< "]"; //cout<< "\n neighbor: "; if (false == initGL(&argc, argv)) { return false; } if (checkCmdLineFlag(argc, (const char **)argv, "device")) { if (gpuGLDeviceInit(argc, (const char **)argv) == -1) { return false; } } else { cudaGLSetGLDevice(gpuGetMaxGflopsDeviceId()); } glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMouseFunc(mouse); glutMotionFunc(motion); glutCloseFunc(cleanup); // glutTimerFunc(0,timerMeasure,0); createVBO(&vbo, &cuda_vbo_resource, cudaGraphicsMapFlagsWriteDiscard); glutMainLoop(); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource,int modeCA,CellType *Cells_device,Index *index_device) { /* get_force( dens_prev, u_prev, v_prev, w_prev ); vel_step ( M,N,O, u, v, w, u_prev, v_prev,w_prev, visc, dt ); dens_step ( M,N,O, dens, dens_prev, u, v, w, diff, dt ); */ float4 *dptr; checkCudaErrors(cudaGraphicsMapResources(1, vbo_resource, 0)); size_t num_bytes; checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource)); dim3 block(8, 8, 8); dim3 grid(MAXX / block.x, MAXY / block.y, MAXZ/block.z); //game_of_life_kernel<<< grid, block>>>(dptr, MAXX,MAXY,MAXZ, modeCA,Cells_device,index_device,show3D); main_simulation<<< grid, block>>>(dptr, MAXX,MAXY,MAXZ, modeCA,Cells_device,index_device); checkCudaErrors(cudaGraphicsUnmapResources(1, vbo_resource, 0)); } //////////////////////////////////////////////////////////////////////////////// //! Create VBO //////////////////////////////////////////////////////////////////////////////// void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags) { assert(vbo); glGenBuffers(1, vbo); glBindBuffer(GL_ARRAY_BUFFER, *vbo); unsigned int size = MAXX * MAXY * MAXZ * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); checkCudaErrors(cudaGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags)); SDK_CHECK_ERROR_GL(); } static void draw_velocity (){ int i, j, k; float x, y, z, h; h = 1.0f/MAX(MAX(M, N), MAX(N, O)); glColor3f ( 0.5f, 0.5f, 0.5f ); glLineWidth ( 1.0f ); glBegin ( GL_LINES ); for ( i=1; i<=M; i++ ) { x = (i-0.5f)*h; for ( j=1; j<=N; j++ ) { y = (j-0.5f)*h; for ( k=1; k<=O; k++ ) { z = (k-0.5f)*h; glVertex3f ( x, y, z ); glVertex3f ( x+u[IX(i,j,k)], y+v[IX(i,j,k)], z+w[IX(i,j,k)] ); } } } glEnd (); } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display() { // cout<<"\n average time = "<<sdkGetAverageTimerValue(&timer) / 1000.f ; /* if (sdkGetAverageTimerValue(&timer)>0.1) { sdkStopTimer(&timer);sdkStartTimer(&timer); return; } */ sdkStartTimer(&timer); glutTimerFunc(1000/g_fAnim, runCudaLoop, runControl); // run CUDA kernel to generate vertex positions // runCuda(&cuda_vbo_resource,0,AllCells_device,cell_index_device); // cudaDeviceSynchronize(); // checkCudaErrors(cudaMemcpy(AllCells_host,AllCells_device, arraycellsize, cudaMemcpyDeviceToHost)); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glTranslatef(0.0, 0.0, translate_z); glRotatef(rotate_x, 1.0, 0.0, 0.0); glRotatef(rotate_y, 0.0, 1.0, 0.0); if(showCell) { glPointSize(3.0f); glBindBuffer(GL_ARRAY_BUFFER, vbo); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor4f(1.0, 0.0, 0.0,0.5f); glDrawArrays(GL_POINTS, 0, MAXX*MAXY*MAXZ); glDisableClientState(GL_VERTEX_ARRAY); } if(showSurface){ glGenBuffers(1, &surfaceVBO); glBindBuffer(GL_ARRAY_BUFFER, surfaceVBO); unsigned int size = MAXX * MAXZ * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, surfacePos, GL_STATIC_DRAW); // glBindBuffer(GL_ARRAY_BUFFER, surfaceVBO); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor4f(0.0, 0.0, 1.0f,1.0f); glDrawArrays(GL_POINTS, 0, MAXX*MAXZ); glDisableClientState(GL_VERTEX_ARRAY); glBindBuffer(GL_ARRAY_BUFFER, 0); } if(showFloat){ GLuint float_vbo; float4 *floatPos; for(int k=0;k<num_floats;k++){ glGenBuffers(1, &float_vbo); glBindBuffer(GL_ARRAY_BUFFER, float_vbo); unsigned int trajecsize = AllFloats_host[k].trajectory_size * 4 * sizeof(float); floatPos = (float4*) malloc (trajecsize); for(int i =0; i<AllFloats_host[k].trajectory_size;i++){ floatPos[i] = make_float4(AllFloats_host[k].trajectory[i].FloatPos.x, AllFloats_host[k].trajectory[i].FloatPos.z, AllFloats_host[k].trajectory[i].FloatPos.y, 1.0f); } glBufferData(GL_ARRAY_BUFFER, trajecsize, floatPos, GL_STATIC_DRAW); // glBindBuffer(GL_ARRAY_BUFFER, surfaceVBO); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor4f(floatcolorred[k] , floatcolorgreen[k] , floatcolorblue[k] ,1.0f); glDrawArrays(GL_LINE_STRIP, 0, AllFloats_host[k].trajectory_size); // void glutWireSphere(GLdouble radius, GLint slices, GLint stacks); glDisableClientState(GL_VERTEX_ARRAY); glBindBuffer(GL_ARRAY_BUFFER, 0); } } //show sample vertical z color red // float4 *verticalPos; // unsigned int versize = 256 * sizeof(float); //verticalPos = (float4*) malloc (versize); /* for(int k=0; k< 256; k++){ float z = (float)k/256; glPointSize(3.0f); glBegin (GL_POINTS); glVertex3f (0.5,z,0.5 ); glColor3f (z,0,0); glEnd(); } if(drawVec) draw_velocity (); */ // glColor3f(1.0,0.0,0.0); // glLoadIdentity(); // glutWireSphere( 0.05, 8, 4); // glFlush(); glutSwapBuffers(); g_fAnim += g_fAnimInc; if(animFlag) { // glutPostRedisplay(); } sdkStopTimer(&timer); computeFPS(); } void timerEvent(int value) { if (glutGetWindow()) { glutPostRedisplay(); glutTimerFunc(REFRESH_DELAY, timerEvent,0); } } //////////////////////////////////////////////////////////////////////////////// //! Delete VBO //////////////////////////////////////////////////////////////////////////////// void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res) { checkCudaErrors(cudaGraphicsUnregisterResource(vbo_res)); glBindBuffer(1, *vbo); glDeleteBuffers(1, vbo); *vbo = 0; } void cleanup() { sdkDeleteTimer(&timer); if (vbo) { deleteVBO(&vbo, cuda_vbo_resource); } cudaDeviceReset(); cudaFree(AllFloats_device); cudaFree(AllCells_device); cudaFree(cell_index_device); free(AllFloats_host); free(AllCells_host); free(cell_index_host); free(floatPos); free(surfacePos); free(floatcolorred); free(floatcolorgreen); free(floatcolorblue); } //////////////////////////////////////////////////////////////////////////////// //! Keyboard events handler //////////////////////////////////////////////////////////////////////////////// void keyboard(unsigned char key, int /*x*/, int /*y*/) { switch (key) { case (27) : glutDestroyWindow(glutGetWindow()); return; case 'a': // toggle animation case 'A': animFlag = (animFlag)?0:1; break; case '-': // decrease the time increment for the CUDA kernel g_fAnimInc -= 0.01; break; case '+': // increase the time increment for the CUDA kernel g_fAnimInc += 0.01; break; case 'r': // reset the time increment g_fAnimInc = 0.01; break; case 'f':showFloat = !showFloat; break; case 's': case 'S': runControl ++; runControl %=2; break; case 'z': showSurface = !showSurface; break; } glutPostRedisplay(); } //////////////////////////////////////////////////////////////////////////////// //! Mouse event handlers //////////////////////////////////////////////////////////////////////////////// void mouse(int button, int state, int x, int y) { if (state == GLUT_DOWN) { mouse_buttons |= 1<<button; } else if (state == GLUT_UP) { mouse_buttons = 0; } mouse_old_x = x; mouse_old_y = y; } void motion(int x, int y) { float dx, dy; dx = (float)(x - mouse_old_x); dy = (float)(y - mouse_old_y); if (mouse_buttons & 1) { rotate_x += dy * 0.2f; rotate_y += dx * 0.2f; } else if (mouse_buttons & 4) { translate_z += dy * 0.01f; } mouse_old_x = x; mouse_old_y = y; } void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)MAX(avgFPS, 1.f); sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "Cuda GL Float: %3.1f fps (Max 100Hz)", avgFPS); glutSetWindowTitle(fps); } //////////////////////////////////////////////////////////////////////////////// //! Initialize GL //////////////////////////////////////////////////////////////////////////////// bool initGL(int *argc, char **argv) { glutInit(argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Cuda GL Float"); glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMotionFunc(motion); glutTimerFunc(REFRESH_DELAY, timerEvent,0); // initialize necessary OpenGL extensions glewInit(); if (! glewIsSupported("GL_VERSION_2_0 ")) { fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush(stderr); return false; } glClearColor(0.0, 0.0, 0.0, 1.0); // Enable depth test glEnable(GL_DEPTH_TEST); // Accept fragment if it closer to the camera than the former one glDepthFunc(GL_LESS); glEnable(GL_BLEND); //enable alpha color glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);//enable alpha color glViewport(0, 0, window_width, window_height); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 10.0); // float attenuation[] = {1.0f, -0.01f, -.000001f}; // glPointParameterfv(GL_POINT_DISTANCE_ATTENUATION, attenuation, 0); // glPointParameter(GL_POINT_DISTANCE_ATTENUATION,1.0f,-0.01f,-.000001f); // glEnable(GL_POINT_DISTANCE_ATTENTUATION); SDK_CHECK_ERROR_GL(); return true; }
1471d323ff4d98ae51163b8b46a15e4ab2bad0be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #define BDIM 1024 __global__ void fast_transpose(double *a, double *b, int N) { // buffer __shared__ double buffer[BDIM]; int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; // doing the transposition on the shared memory buffer[threadIdx.y * blockDim.x + threadIdx.x] = a[y * N + x]; __syncthreads(); // copy back on global memory b[x * N + y] = buffer[threadIdx.y * blockDim.x + threadIdx.x]; } // naive transpose __global__ void transpose(double *a, double *b, int N) { int row = (blockIdx.x * blockDim.x + threadIdx.x) / N; int col = (blockIdx.x * blockDim.x + threadIdx.x) % N; b[col * N + row] = a[row * N + col]; } // just randomlly fill the matrix void random_fill(double *mat, int N) { for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) mat[i * N + j] = (double)rand() / (double)RAND_MAX * 100.; } // Used for error-checking void transpose_cpu(double *a, double *b, int N) { for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) b[j * N + i] = a[i * N + j]; } // check if two matrix are equals int is_equal(double *a, double *b, int N) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) if (b[i * N + j] != a[i * N + j]) return 0; } return 1; } void print_mat(double *a, int N) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%.1f ", a[i * N + j]); } printf("\n"); } printf("\n"); } int main(int argc, char *argv[]) { double *a, *b, *c, *d; // host copies of a, b, c const int N = 8192; double *dev_a, *dev_b, *dev_c; // device copies of a, b, c int size = N * N * sizeof(double); // we need space for 512 // Get the number of block dimensions (dim1*dim2 = number of threads) if (argc < 3) { printf("Insert the dimensions, first x, second y\n"); return -1; } // get block dimensions from command line const int dim1 = atoi(argv[1]); const int dim2 = atoi(argv[2]); const int Nblocks = (N * N) / 1024; if (dim1 * dim2 != BDIM) { printf("Give rigth dimensions\n"); return -2; } dim3 grid, block; block.x = dim1; block.y = dim2; grid.x = N / block.x; grid.y = N / block.y; // allocate device copies of a, b, c hipMalloc((void **)&dev_a, size); hipMalloc((void **)&dev_b, size); a = (double *)malloc(size); b = (double *)malloc(size); d = (double *)malloc(size); // fill the matrix with random numbers random_fill(a, N); hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice); // cuda event for timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( fast_transpose), dim3(grid), dim3(block), 0, 0, dev_a, dev_b, N); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); hipMemcpy(b, dev_b, size, hipMemcpyDeviceToHost); // print_mat(b,N); transpose_cpu(a, d, N); int equal = is_equal(b, d, N); if (equal) printf("Correct fast\n"); else printf("Uncorrect fast\n"); // Bandwith for reading from matrix a + writing on matrix b printf("Time fast= %f\n", milliseconds); printf("Bandwidth fast= %f\n", N * N * 2 * 8 / milliseconds / 1e6); free(b); hipFree(dev_b); c = (double *)malloc(size); hipMalloc((void **)&dev_c, size); hipEventRecord(start); hipLaunchKernelGGL(( transpose), dim3(Nblocks), dim3(1024), 0, 0, dev_a, dev_c, N); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost); equal = is_equal(c, d, N); if (equal) printf("Correct naive\n"); else printf("Uncorrect naive\n"); printf("Time naive = %f\n", milliseconds); printf("Bandwidth naive= %f\n", N * N * 2 * 8 / milliseconds / 1e6); free(a); free(c); free(d); hipFree(dev_a); hipFree(dev_c); return 0; }
1471d323ff4d98ae51163b8b46a15e4ab2bad0be.cu
#include <math.h> #include <stdio.h> #define BDIM 1024 __global__ void fast_transpose(double *a, double *b, int N) { // buffer __shared__ double buffer[BDIM]; int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; // doing the transposition on the shared memory buffer[threadIdx.y * blockDim.x + threadIdx.x] = a[y * N + x]; __syncthreads(); // copy back on global memory b[x * N + y] = buffer[threadIdx.y * blockDim.x + threadIdx.x]; } // naive transpose __global__ void transpose(double *a, double *b, int N) { int row = (blockIdx.x * blockDim.x + threadIdx.x) / N; int col = (blockIdx.x * blockDim.x + threadIdx.x) % N; b[col * N + row] = a[row * N + col]; } // just randomlly fill the matrix void random_fill(double *mat, int N) { for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) mat[i * N + j] = (double)rand() / (double)RAND_MAX * 100.; } // Used for error-checking void transpose_cpu(double *a, double *b, int N) { for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) b[j * N + i] = a[i * N + j]; } // check if two matrix are equals int is_equal(double *a, double *b, int N) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) if (b[i * N + j] != a[i * N + j]) return 0; } return 1; } void print_mat(double *a, int N) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%.1f ", a[i * N + j]); } printf("\n"); } printf("\n"); } int main(int argc, char *argv[]) { double *a, *b, *c, *d; // host copies of a, b, c const int N = 8192; double *dev_a, *dev_b, *dev_c; // device copies of a, b, c int size = N * N * sizeof(double); // we need space for 512 // Get the number of block dimensions (dim1*dim2 = number of threads) if (argc < 3) { printf("Insert the dimensions, first x, second y\n"); return -1; } // get block dimensions from command line const int dim1 = atoi(argv[1]); const int dim2 = atoi(argv[2]); const int Nblocks = (N * N) / 1024; if (dim1 * dim2 != BDIM) { printf("Give rigth dimensions\n"); return -2; } dim3 grid, block; block.x = dim1; block.y = dim2; grid.x = N / block.x; grid.y = N / block.y; // allocate device copies of a, b, c cudaMalloc((void **)&dev_a, size); cudaMalloc((void **)&dev_b, size); a = (double *)malloc(size); b = (double *)malloc(size); d = (double *)malloc(size); // fill the matrix with random numbers random_fill(a, N); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); // cuda event for timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); fast_transpose<<<grid, block>>>(dev_a, dev_b, N); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaMemcpy(b, dev_b, size, cudaMemcpyDeviceToHost); // print_mat(b,N); transpose_cpu(a, d, N); int equal = is_equal(b, d, N); if (equal) printf("Correct fast\n"); else printf("Uncorrect fast\n"); // Bandwith for reading from matrix a + writing on matrix b printf("Time fast= %f\n", milliseconds); printf("Bandwidth fast= %f\n", N * N * 2 * 8 / milliseconds / 1e6); free(b); cudaFree(dev_b); c = (double *)malloc(size); cudaMalloc((void **)&dev_c, size); cudaEventRecord(start); transpose<<<Nblocks, 1024>>>(dev_a, dev_c, N); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost); equal = is_equal(c, d, N); if (equal) printf("Correct naive\n"); else printf("Uncorrect naive\n"); printf("Time naive = %f\n", milliseconds); printf("Bandwidth naive= %f\n", N * N * 2 * 8 / milliseconds / 1e6); free(a); free(c); free(d); cudaFree(dev_a); cudaFree(dev_c); return 0; }
679c32547af709213c599d6fbee6be9db3ea5673.hip
// !!! This is a file automatically generated by hipify!!! #include <call_kernel.h> //fail: data-race, all the threads write on A[0] #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <sm_atomic_functions.h> #define N 2 __global__ void race_test (unsigned int* i, int* A) { int tid = threadIdx.x; int j = atomicAdd(i,0); A[j] = tid; } int main(){ unsigned int *i; int *A; unsigned int *dev_i; int *dev_A; A = (int*)malloc(N*sizeof(int)); for (int t = 0; t < N; ++t){ A[t] = 11; printf(" %d ", A[t]); } i = (unsigned int*)malloc(sizeof(unsigned int)); *i = 0; hipMalloc((void**)&dev_A, N*sizeof(int)); hipMalloc((void**)&dev_i, sizeof(unsigned int)); hipMemcpy(dev_A, A, N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_i, i, sizeof(unsigned int), hipMemcpyHostToDevice); //race_test<<<1,N>>>(dev_i, dev_A); ESBMC_verify_kernel_u(race_test,1,N,dev_i,dev_A); hipMemcpy(A, dev_A, N*sizeof(int), hipMemcpyDeviceToHost); for (int t=0; t<N;t++){ printf("A[%d]=%d; ", t, A[t]); } //assert(A[0] == 11); assert(A[0] == 0 || A[0] == 1); // A[0] == i,where i = [0,N-1] free(A); free(i); hipFree(dev_A); hipFree(dev_i); return 0; }
679c32547af709213c599d6fbee6be9db3ea5673.cu
#include <call_kernel.h> //fail: data-race, all the threads write on A[0] #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <sm_atomic_functions.h> #define N 2 __global__ void race_test (unsigned int* i, int* A) { int tid = threadIdx.x; int j = atomicAdd(i,0); A[j] = tid; } int main(){ unsigned int *i; int *A; unsigned int *dev_i; int *dev_A; A = (int*)malloc(N*sizeof(int)); for (int t = 0; t < N; ++t){ A[t] = 11; printf(" %d ", A[t]); } i = (unsigned int*)malloc(sizeof(unsigned int)); *i = 0; cudaMalloc((void**)&dev_A, N*sizeof(int)); cudaMalloc((void**)&dev_i, sizeof(unsigned int)); cudaMemcpy(dev_A, A, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_i, i, sizeof(unsigned int), cudaMemcpyHostToDevice); //race_test<<<1,N>>>(dev_i, dev_A); ESBMC_verify_kernel_u(race_test,1,N,dev_i,dev_A); cudaMemcpy(A, dev_A, N*sizeof(int), cudaMemcpyDeviceToHost); for (int t=0; t<N;t++){ printf("A[%d]=%d; ", t, A[t]); } //assert(A[0] == 11); assert(A[0] == 0 || A[0] == 1); // A[0] == i,where i = [0,N-1] free(A); free(i); cudaFree(dev_A); cudaFree(dev_i); return 0; }
780712b6bce99c4a5bc67f291cf912a44aec046f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Nearest neighbor search * * * * O(N) * GPU */ #include <stdio.h> #include <stdlib.h> #include <vector> #include <list> #include <time.h> #define CITY_SIZE 200 #define NUM_GPU_BLOCKS 4 #define NUM_GPU_THREADS 128 #define NUM_FEATURES 5 #define QUEUE_SIZE 5000 #define CUDA_CALL(x) {if((x) != hipSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", hipGetErrorString(hipGetLastError())); \ exit(EXIT_FAILURE);}} struct ZoneType { int type; int level; }; struct ZoningPlan { ZoneType zones[CITY_SIZE][CITY_SIZE]; }; struct DistanceMap { int distances[CITY_SIZE][CITY_SIZE][NUM_FEATURES]; }; struct Point2D { int x; int y; __host__ __device__ Point2D() : x(0), y(0) {} __host__ __device__ Point2D(int x, int y) : x(x), y(y) {} }; struct Point2DAndFeature { int x; int y; int featureId; __host__ __device__ Point2DAndFeature() : x(0), y(0), featureId(0) {} __host__ __device__ Point2DAndFeature(int x, int y, int featureId) : x(x), y(y), featureId(featureId) {} }; __host__ __device__ unsigned int rand(unsigned int* randx) { *randx = *randx * 1103515245 + 12345; return (*randx)&2147483647; } __host__ __device__ float randf(unsigned int* randx) { return rand(randx) / (float(2147483647) + 1); } __host__ __device__ float randf(unsigned int* randx, float a, float b) { return randf(randx) * (b - a) + a; } __host__ __device__ int sampleFromCdf(unsigned int* randx, float* cdf, int num) { float rnd = randf(randx, 0, cdf[num-1]); for (int i = 0; i < num; ++i) { if (rnd <= cdf[i]) return i; } return num - 1; } __host__ __device__ int sampleFromPdf(unsigned int* randx, float* pdf, int num) { if (num == 0) return 0; float cdf[40]; cdf[0] = pdf[0]; for (int i = 1; i < num; ++i) { if (pdf[i] >= 0) { cdf[i] = cdf[i - 1] + pdf[i]; } else { cdf[i] = cdf[i - 1]; } } return sampleFromCdf(randx, cdf, num); } /** * */ __host__ void generateZoningPlan(ZoningPlan& zoningPlan, std::vector<float> zoneTypeDistribution) { std::vector<float> numRemainings(zoneTypeDistribution.size()); for (int i = 0; i < zoneTypeDistribution.size(); ++i) { numRemainings[i] = CITY_SIZE * CITY_SIZE * zoneTypeDistribution[i]; } unsigned int randx = 0; for (int r = 0; r < CITY_SIZE; ++r) { for (int c = 0; c < CITY_SIZE; ++c) { int type = sampleFromPdf(&randx, numRemainings.data(), numRemainings.size()); zoningPlan.zones[r][c].type = type; numRemainings[type] -= 1; } } } __global__ void initDistance(ZoningPlan* zoningPlan, DistanceMap* distanceMap, Point2DAndFeature* queue, int* queueEnd) { int idx = blockDim.x * blockIdx.x + threadIdx.x; queueEnd[idx] = 0; int stride = ceilf((float)(CITY_SIZE * CITY_SIZE) / NUM_GPU_BLOCKS / NUM_GPU_THREADS); // for (int i = 0; i < stride; ++i) { int r = (idx * stride + i) / CITY_SIZE; int c = (idx * stride + i) % CITY_SIZE; if (r >= CITY_SIZE) continue; for (int feature_id = 0; feature_id < NUM_FEATURES; ++feature_id) { if (zoningPlan->zones[r][c].type - 1 == feature_id) { queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(c, r, feature_id); distanceMap->distances[r][c][feature_id] = 0; } else { distanceMap->distances[r][c][feature_id] = 9999; } } } } /** * */ __global__ void computeDistanceToStore(ZoningPlan* zoningPlan, DistanceMap* distanceMap, Point2DAndFeature* queue, int* queueEnd) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int queue_begin = 0; // while (queue_begin < queueEnd[idx]) { Point2DAndFeature pt = queue[idx * QUEUE_SIZE + queue_begin++]; if (queue_begin >= QUEUE_SIZE) queue_begin = 0; int d = distanceMap->distances[pt.y][pt.x][pt.featureId]; if (pt.y > 0) { int old = atomicMin(&distanceMap->distances[pt.y-1][pt.x][pt.featureId], d + 1); if (old > d + 1) { queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x, pt.y-1, pt.featureId); if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0; } } if (pt.y < CITY_SIZE - 1) { int old = atomicMin(&distanceMap->distances[pt.y+1][pt.x][pt.featureId], d + 1); if (old > d + 1) { queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x, pt.y+1, pt.featureId); if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0; } } if (pt.x > 0) { int old = atomicMin(&distanceMap->distances[pt.y][pt.x-1][pt.featureId], d + 1); if (old > d + 1) { queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x-1, pt.y, pt.featureId); if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0; } } if (pt.x < CITY_SIZE - 1) { int old = atomicMin(&distanceMap->distances[pt.y][pt.x+1][pt.featureId], d + 1); if (old > d + 1) { queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x+1, pt.y, pt.featureId); if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0; } } } } /** * CPU */ __host__ void computeDistanceToStoreCPU(ZoningPlan* zoningPLan, DistanceMap* distanceMap) { std::list<Point2DAndFeature> queue; for (int feature_id = 0; feature_id < NUM_FEATURES; ++feature_id) { for (int cell_id = 0; cell_id < CITY_SIZE * CITY_SIZE; ++cell_id) { int r = cell_id / CITY_SIZE; int c = cell_id % CITY_SIZE; if (zoningPLan->zones[r][c].type - 1== feature_id) { queue.push_back(Point2DAndFeature(c, r, feature_id)); distanceMap->distances[r][c][feature_id] = 0; } else { distanceMap->distances[r][c][feature_id] = 9999; } } } while (!queue.empty()) { Point2DAndFeature pt = queue.front(); queue.pop_front(); int d = distanceMap->distances[pt.y][pt.x][pt.featureId]; if (pt.y > 0) { if (distanceMap->distances[pt.y-1][pt.x][pt.featureId] > d + 1) { distanceMap->distances[pt.y-1][pt.x][pt.featureId] = d + 1; queue.push_back(Point2DAndFeature(pt.x, pt.y-1, pt.featureId)); } } if (pt.y < CITY_SIZE - 1) { if (distanceMap->distances[pt.y+1][pt.x][pt.featureId] > d + 1) { distanceMap->distances[pt.y+1][pt.x][pt.featureId] = d + 1; queue.push_back(Point2DAndFeature(pt.x, pt.y+1, pt.featureId)); } } if (pt.x > 0) { if (distanceMap->distances[pt.y][pt.x-1][pt.featureId] > d + 1) { distanceMap->distances[pt.y][pt.x-1][pt.featureId] = d + 1; queue.push_back(Point2DAndFeature(pt.x-1, pt.y, pt.featureId)); } } if (pt.x < CITY_SIZE - 1) { if (distanceMap->distances[pt.y][pt.x+1][pt.featureId] > d + 1) { distanceMap->distances[pt.y][pt.x+1][pt.featureId] = d + 1; queue.push_back(Point2DAndFeature(pt.x+1, pt.y, pt.featureId)); } } } } int main() { time_t start, end; ZoningPlan* hostZoningPlan = (ZoningPlan*)malloc(sizeof(ZoningPlan)); DistanceMap* hostDistanceMap = (DistanceMap*)malloc(sizeof(DistanceMap)); DistanceMap* hostDistanceMap2 = (DistanceMap*)malloc(sizeof(DistanceMap)); // memset(hostDistanceMap, 9999, sizeof(DistanceMap)); memset(hostDistanceMap2, 9999, sizeof(DistanceMap)); std::vector<float> zoneTypeDistribution(6); zoneTypeDistribution[0] = 0.5f; zoneTypeDistribution[1] = 0.2f; zoneTypeDistribution[2] = 0.1f; zoneTypeDistribution[3] = 0.1f; zoneTypeDistribution[4] = 0.05f; zoneTypeDistribution[5] = 0.05f; // start = clock(); generateZoningPlan(*hostZoningPlan, zoneTypeDistribution); end = clock(); printf("generateZoningPlan: %lf\n", (double)(end-start)/CLOCKS_PER_SEC); // if (CITY_SIZE <= 100) { for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", hostZoningPlan->zones[r][c].type); } printf("\n"); } printf("\n"); } // ZoningPlan* devZoningPlan; CUDA_CALL(hipMalloc((void**)&devZoningPlan, sizeof(ZoningPlan))); CUDA_CALL(hipMemcpy(devZoningPlan, hostZoningPlan, sizeof(ZoningPlan), hipMemcpyHostToDevice)); // DistanceMap* devDistanceMap; CUDA_CALL(hipMalloc((void**)&devDistanceMap, sizeof(DistanceMap))); // Point2DAndFeature* devQueue; CUDA_CALL(hipMalloc((void**)&devQueue, sizeof(Point2DAndFeature) * QUEUE_SIZE * NUM_GPU_BLOCKS * NUM_GPU_THREADS)); int* devQueueEnd; CUDA_CALL(hipMalloc((void**)&devQueueEnd, sizeof(int) * NUM_GPU_BLOCKS * NUM_GPU_THREADS)); /////////////////////////////////////////////////////////////////////// // CPU start = clock(); for (int iter = 0; iter < 1000; ++iter) { computeDistanceToStoreCPU(hostZoningPlan, hostDistanceMap2); } end = clock(); printf("computeDistanceToStore CPU: %lf\n", (double)(end-start)/CLOCKS_PER_SEC); /////////////////////////////////////////////////////////////////////// // float elapsed1 = 0.0f; float elapsed2 = 0.0f; for (int iter = 0; iter < 1000; ++iter) { start = clock(); hipLaunchKernelGGL(( initDistance), dim3(NUM_GPU_BLOCKS), dim3(NUM_GPU_THREADS), 0, 0, devZoningPlan, devDistanceMap, devQueue, devQueueEnd); hipDeviceSynchronize(); end = clock(); elapsed1 += (double)(end-start)/CLOCKS_PER_SEC; start = clock(); hipLaunchKernelGGL(( computeDistanceToStore), dim3(NUM_GPU_BLOCKS), dim3(NUM_GPU_THREADS), 0, 0, devZoningPlan, devDistanceMap, devQueue, devQueueEnd); hipDeviceSynchronize(); end = clock(); elapsed2 += (double)(end-start)/CLOCKS_PER_SEC; } printf("computeDistanceToStore: initDistance = %lf, updateDistance = %lf\n", elapsed1, elapsed2); // CPU CUDA_CALL(hipMemcpy(hostDistanceMap, devDistanceMap, sizeof(DistanceMap), hipMemcpyDeviceToHost)); // CPU int bad_k = 0; bool err = false; { for (int r = CITY_SIZE - 1; r >= 0 && !err; --r) { for (int c = 0; c < CITY_SIZE && !err; ++c) { for (int k = 0; k < NUM_FEATURES && !err; ++k) { if (hostDistanceMap->distances[r][c][k] != hostDistanceMap2->distances[r][c][k]) { err = true; printf("ERROR! %d,%d k=%d, %d != %d\n", r, c, k, hostDistanceMap->distances[r][c][k], hostDistanceMap2->distances[r][c][k]); bad_k = k; } } } } } // if (CITY_SIZE <= 100 && err) { for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", hostDistanceMap->distances[r][c][bad_k]); } printf("\n"); } printf("\n"); for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", hostDistanceMap2->distances[r][c][bad_k]); } printf("\n"); } printf("\n"); } // hipFree(devZoningPlan); hipFree(devDistanceMap); // CPU free(hostZoningPlan); free(hostDistanceMap); free(hostDistanceMap2); hipDeviceReset(); }
780712b6bce99c4a5bc67f291cf912a44aec046f.cu
/** * Nearest neighbor search * マップ内に、店、工場などのゾーンがある確率で配備されている時、 * 住宅ゾーンから直近の店、工場までのマンハッタン距離を計算する。 * * 各店、工場から周辺に再帰的に距離を更新していくので、O(N)で済む。 * しかも、GPUで並列化することで、さらに計算時間を短縮できる。 */ #include <stdio.h> #include <stdlib.h> #include <vector> #include <list> #include <time.h> #define CITY_SIZE 200 #define NUM_GPU_BLOCKS 4 #define NUM_GPU_THREADS 128 #define NUM_FEATURES 5 #define QUEUE_SIZE 5000 #define CUDA_CALL(x) {if((x) != cudaSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \ exit(EXIT_FAILURE);}} struct ZoneType { int type; int level; }; struct ZoningPlan { ZoneType zones[CITY_SIZE][CITY_SIZE]; }; struct DistanceMap { int distances[CITY_SIZE][CITY_SIZE][NUM_FEATURES]; }; struct Point2D { int x; int y; __host__ __device__ Point2D() : x(0), y(0) {} __host__ __device__ Point2D(int x, int y) : x(x), y(y) {} }; struct Point2DAndFeature { int x; int y; int featureId; __host__ __device__ Point2DAndFeature() : x(0), y(0), featureId(0) {} __host__ __device__ Point2DAndFeature(int x, int y, int featureId) : x(x), y(y), featureId(featureId) {} }; __host__ __device__ unsigned int rand(unsigned int* randx) { *randx = *randx * 1103515245 + 12345; return (*randx)&2147483647; } __host__ __device__ float randf(unsigned int* randx) { return rand(randx) / (float(2147483647) + 1); } __host__ __device__ float randf(unsigned int* randx, float a, float b) { return randf(randx) * (b - a) + a; } __host__ __device__ int sampleFromCdf(unsigned int* randx, float* cdf, int num) { float rnd = randf(randx, 0, cdf[num-1]); for (int i = 0; i < num; ++i) { if (rnd <= cdf[i]) return i; } return num - 1; } __host__ __device__ int sampleFromPdf(unsigned int* randx, float* pdf, int num) { if (num == 0) return 0; float cdf[40]; cdf[0] = pdf[0]; for (int i = 1; i < num; ++i) { if (pdf[i] >= 0) { cdf[i] = cdf[i - 1] + pdf[i]; } else { cdf[i] = cdf[i - 1]; } } return sampleFromCdf(randx, cdf, num); } /** * ゾーンプランを生成する。 */ __host__ void generateZoningPlan(ZoningPlan& zoningPlan, std::vector<float> zoneTypeDistribution) { std::vector<float> numRemainings(zoneTypeDistribution.size()); for (int i = 0; i < zoneTypeDistribution.size(); ++i) { numRemainings[i] = CITY_SIZE * CITY_SIZE * zoneTypeDistribution[i]; } unsigned int randx = 0; for (int r = 0; r < CITY_SIZE; ++r) { for (int c = 0; c < CITY_SIZE; ++c) { int type = sampleFromPdf(&randx, numRemainings.data(), numRemainings.size()); zoningPlan.zones[r][c].type = type; numRemainings[type] -= 1; } } } __global__ void initDistance(ZoningPlan* zoningPlan, DistanceMap* distanceMap, Point2DAndFeature* queue, int* queueEnd) { int idx = blockDim.x * blockIdx.x + threadIdx.x; queueEnd[idx] = 0; int stride = ceilf((float)(CITY_SIZE * CITY_SIZE) / NUM_GPU_BLOCKS / NUM_GPU_THREADS); // 分割された領域内で、店を探す for (int i = 0; i < stride; ++i) { int r = (idx * stride + i) / CITY_SIZE; int c = (idx * stride + i) % CITY_SIZE; if (r >= CITY_SIZE) continue; for (int feature_id = 0; feature_id < NUM_FEATURES; ++feature_id) { if (zoningPlan->zones[r][c].type - 1 == feature_id) { queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(c, r, feature_id); distanceMap->distances[r][c][feature_id] = 0; } else { distanceMap->distances[r][c][feature_id] = 9999; } } } } /** * 直近の店までの距離を計算する(マルチスレッド版) */ __global__ void computeDistanceToStore(ZoningPlan* zoningPlan, DistanceMap* distanceMap, Point2DAndFeature* queue, int* queueEnd) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int queue_begin = 0; // 距離マップを生成 while (queue_begin < queueEnd[idx]) { Point2DAndFeature pt = queue[idx * QUEUE_SIZE + queue_begin++]; if (queue_begin >= QUEUE_SIZE) queue_begin = 0; int d = distanceMap->distances[pt.y][pt.x][pt.featureId]; if (pt.y > 0) { int old = atomicMin(&distanceMap->distances[pt.y-1][pt.x][pt.featureId], d + 1); if (old > d + 1) { queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x, pt.y-1, pt.featureId); if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0; } } if (pt.y < CITY_SIZE - 1) { int old = atomicMin(&distanceMap->distances[pt.y+1][pt.x][pt.featureId], d + 1); if (old > d + 1) { queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x, pt.y+1, pt.featureId); if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0; } } if (pt.x > 0) { int old = atomicMin(&distanceMap->distances[pt.y][pt.x-1][pt.featureId], d + 1); if (old > d + 1) { queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x-1, pt.y, pt.featureId); if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0; } } if (pt.x < CITY_SIZE - 1) { int old = atomicMin(&distanceMap->distances[pt.y][pt.x+1][pt.featureId], d + 1); if (old > d + 1) { queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x+1, pt.y, pt.featureId); if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0; } } } } /** * 直近の店までの距離を計算する(CPU版) */ __host__ void computeDistanceToStoreCPU(ZoningPlan* zoningPLan, DistanceMap* distanceMap) { std::list<Point2DAndFeature> queue; for (int feature_id = 0; feature_id < NUM_FEATURES; ++feature_id) { for (int cell_id = 0; cell_id < CITY_SIZE * CITY_SIZE; ++cell_id) { int r = cell_id / CITY_SIZE; int c = cell_id % CITY_SIZE; if (zoningPLan->zones[r][c].type - 1== feature_id) { queue.push_back(Point2DAndFeature(c, r, feature_id)); distanceMap->distances[r][c][feature_id] = 0; } else { distanceMap->distances[r][c][feature_id] = 9999; } } } while (!queue.empty()) { Point2DAndFeature pt = queue.front(); queue.pop_front(); int d = distanceMap->distances[pt.y][pt.x][pt.featureId]; if (pt.y > 0) { if (distanceMap->distances[pt.y-1][pt.x][pt.featureId] > d + 1) { distanceMap->distances[pt.y-1][pt.x][pt.featureId] = d + 1; queue.push_back(Point2DAndFeature(pt.x, pt.y-1, pt.featureId)); } } if (pt.y < CITY_SIZE - 1) { if (distanceMap->distances[pt.y+1][pt.x][pt.featureId] > d + 1) { distanceMap->distances[pt.y+1][pt.x][pt.featureId] = d + 1; queue.push_back(Point2DAndFeature(pt.x, pt.y+1, pt.featureId)); } } if (pt.x > 0) { if (distanceMap->distances[pt.y][pt.x-1][pt.featureId] > d + 1) { distanceMap->distances[pt.y][pt.x-1][pt.featureId] = d + 1; queue.push_back(Point2DAndFeature(pt.x-1, pt.y, pt.featureId)); } } if (pt.x < CITY_SIZE - 1) { if (distanceMap->distances[pt.y][pt.x+1][pt.featureId] > d + 1) { distanceMap->distances[pt.y][pt.x+1][pt.featureId] = d + 1; queue.push_back(Point2DAndFeature(pt.x+1, pt.y, pt.featureId)); } } } } int main() { time_t start, end; ZoningPlan* hostZoningPlan = (ZoningPlan*)malloc(sizeof(ZoningPlan)); DistanceMap* hostDistanceMap = (DistanceMap*)malloc(sizeof(DistanceMap)); DistanceMap* hostDistanceMap2 = (DistanceMap*)malloc(sizeof(DistanceMap)); // 距離を初期化 memset(hostDistanceMap, 9999, sizeof(DistanceMap)); memset(hostDistanceMap2, 9999, sizeof(DistanceMap)); std::vector<float> zoneTypeDistribution(6); zoneTypeDistribution[0] = 0.5f; zoneTypeDistribution[1] = 0.2f; zoneTypeDistribution[2] = 0.1f; zoneTypeDistribution[3] = 0.1f; zoneTypeDistribution[4] = 0.05f; zoneTypeDistribution[5] = 0.05f; // 初期プランを生成 start = clock(); generateZoningPlan(*hostZoningPlan, zoneTypeDistribution); end = clock(); printf("generateZoningPlan: %lf\n", (double)(end-start)/CLOCKS_PER_SEC); // デバッグ用 if (CITY_SIZE <= 100) { for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", hostZoningPlan->zones[r][c].type); } printf("\n"); } printf("\n"); } // 初期プランをデバイスバッファへコピー ZoningPlan* devZoningPlan; CUDA_CALL(cudaMalloc((void**)&devZoningPlan, sizeof(ZoningPlan))); CUDA_CALL(cudaMemcpy(devZoningPlan, hostZoningPlan, sizeof(ZoningPlan), cudaMemcpyHostToDevice)); // 距離マップ用に、デバイスバッファを確保 DistanceMap* devDistanceMap; CUDA_CALL(cudaMalloc((void**)&devDistanceMap, sizeof(DistanceMap))); // キュー用にデバイスバッファを確保 Point2DAndFeature* devQueue; CUDA_CALL(cudaMalloc((void**)&devQueue, sizeof(Point2DAndFeature) * QUEUE_SIZE * NUM_GPU_BLOCKS * NUM_GPU_THREADS)); int* devQueueEnd; CUDA_CALL(cudaMalloc((void**)&devQueueEnd, sizeof(int) * NUM_GPU_BLOCKS * NUM_GPU_THREADS)); /////////////////////////////////////////////////////////////////////// // CPU版で、直近の店までの距離を計算 start = clock(); for (int iter = 0; iter < 1000; ++iter) { computeDistanceToStoreCPU(hostZoningPlan, hostDistanceMap2); } end = clock(); printf("computeDistanceToStore CPU: %lf\n", (double)(end-start)/CLOCKS_PER_SEC); /////////////////////////////////////////////////////////////////////// // マルチスレッドで、直近の店までの距離を計算 float elapsed1 = 0.0f; float elapsed2 = 0.0f; for (int iter = 0; iter < 1000; ++iter) { start = clock(); initDistance<<<NUM_GPU_BLOCKS, NUM_GPU_THREADS>>>(devZoningPlan, devDistanceMap, devQueue, devQueueEnd); cudaDeviceSynchronize(); end = clock(); elapsed1 += (double)(end-start)/CLOCKS_PER_SEC; start = clock(); computeDistanceToStore<<<NUM_GPU_BLOCKS, NUM_GPU_THREADS>>>(devZoningPlan, devDistanceMap, devQueue, devQueueEnd); cudaDeviceSynchronize(); end = clock(); elapsed2 += (double)(end-start)/CLOCKS_PER_SEC; } printf("computeDistanceToStore: initDistance = %lf, updateDistance = %lf\n", elapsed1, elapsed2); // 距離をCPUバッファへコピー CUDA_CALL(cudaMemcpy(hostDistanceMap, devDistanceMap, sizeof(DistanceMap), cudaMemcpyDeviceToHost)); // CPU版とマルチスレッドの結果を比較 int bad_k = 0; bool err = false; { for (int r = CITY_SIZE - 1; r >= 0 && !err; --r) { for (int c = 0; c < CITY_SIZE && !err; ++c) { for (int k = 0; k < NUM_FEATURES && !err; ++k) { if (hostDistanceMap->distances[r][c][k] != hostDistanceMap2->distances[r][c][k]) { err = true; printf("ERROR! %d,%d k=%d, %d != %d\n", r, c, k, hostDistanceMap->distances[r][c][k], hostDistanceMap2->distances[r][c][k]); bad_k = k; } } } } } // デバッグ用 if (CITY_SIZE <= 100 && err) { for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", hostDistanceMap->distances[r][c][bad_k]); } printf("\n"); } printf("\n"); for (int r = CITY_SIZE - 1; r >= 0; --r) { for (int c = 0; c < CITY_SIZE; ++c) { printf("%d, ", hostDistanceMap2->distances[r][c][bad_k]); } printf("\n"); } printf("\n"); } // デバイスバッファの開放 cudaFree(devZoningPlan); cudaFree(devDistanceMap); // CPUバッファの開放 free(hostZoningPlan); free(hostDistanceMap); free(hostDistanceMap2); cudaDeviceReset(); }
test1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <thrust/version.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> //#include <thrust/fill.h> // examples // [1] https://thrust.github.io/doc/group__transformed__reductions_ga0d4232a9685675f488c3cc847111e48d.html template <typename T> struct summary_stats_data { T n, mean, M2; // min, max, void initialize() { n = mean = M2 = 0; } T stdev() { return std::sqrt( M2 / n ); } }; // stats_unary_op is a functor that takes in a value x and // returns a variace_data whose mean value is initialized to x. template <typename T> struct summary_stats_unary_op { __host__ __device__ summary_stats_data<T> operator()(const T& x) const { summary_stats_data<T> result; result.n = 1; result.mean = x; result.M2 = 0; return result; } }; // summary_stats_binary_op is a functor that accepts two summary_stats_data // structs and returns a new summary_stats_data which are an // approximation to the summary_stats for // all values that have been agregated so far template <typename T> struct summary_stats_binary_op : public thrust::binary_function<const summary_stats_data<T>&, const summary_stats_data<T>&, summary_stats_data<T> > { __host__ __device__ summary_stats_data<T> operator()(const summary_stats_data<T>& x, const summary_stats_data <T>& y) const { summary_stats_data<T> result; // precompute some common subexpressions T n = x.n + y.n; T delta = y.mean - x.mean; T delta2 = delta * delta; //Basic number of samples (n), min, and max result.n = n; result.mean = x.mean + delta * y.n / n; result.M2 = x.M2 + y.M2; result.M2 += delta2 * x.n * y.n / n; return result; } }; void thrust_transform_reduce(thrust::device_vector<float> &d_edge_buf, int bcols, int bsize); void my_version(thrust::device_vector<float> &d_edge_buf, int bcols, int bsize); __global__ void my_transform_reduce(float *g_input, float *g_out_mean, float *g_out_stdev, const int len, const int warpNum ); int main(void) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); printf("Device name: %s\n", prop.name); hipSetDevice(0); int major = THRUST_MAJOR_VERSION; int minor = THRUST_MINOR_VERSION; std::cout << "Thrust v" << major << "." << minor << std::endl; //-----------------------------------------------------------------------// std::cout << "initialize data on the host\n"; int bcols = 3; int bsize = 4; int edge_buf_size = bcols * bsize; thrust::host_vector<float> h_edge_buf(edge_buf_size); for(int row=0; row<bsize; row++) { for(int col=0; col<bcols; col++) { h_edge_buf[row * bcols + col] = (float)(col + 1); std::cout << h_edge_buf[row * bcols + col] << " "; } std::cout << std::endl; } std::cout << std::endl; //-----------------------------------------------------------------------// std::cout << "\ncopy host to device\n"; thrust::device_vector<float> d_edge_buf=h_edge_buf; //-----------------------------------------------------------------------// std::cout << "\ntesting thrust::transform_reduce\n"; thrust_transform_reduce(d_edge_buf, bcols, bsize); //-----------------------------------------------------------------------// std::cout << "\ntesting customized version \n"; my_version(d_edge_buf, bcols, bsize); return 0; } void thrust_transform_reduce(thrust::device_vector<float> &d_edge_buf, int bcols, int bsize) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float local_ms = 0.f; summary_stats_unary_op<float> unary_op; summary_stats_binary_op<float> binary_op; summary_stats_data<float> stat_init; summary_stats_data<float> stat_result; thrust::host_vector<float> h_means(1,0.f); thrust::host_vector<float> h_stdevs(1,0.f); stat_init.initialize(); hipEventRecord(start, 0); // gpu code stat_result = thrust::transform_reduce(d_edge_buf.begin(), d_edge_buf.end(), unary_op, stat_init, binary_op); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&local_ms, start, stop); printf("(thrust::transform_reduce) runtime = %lf (ms)\n", local_ms); h_means[0] = stat_result.mean; h_stdevs[0] = stat_result.stdev(); std::cout << "means = " << h_means[0] << std::endl; std::cout << "stdev = " << h_stdevs[0] << std::endl; hipEventDestroy(start); hipEventDestroy(stop); } void my_version(thrust::device_vector<float> &d_edge_buf, int bcols, int bsize) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float local_ms = 0.f; thrust::device_vector<float> d_means(1); thrust::device_vector<float> d_stdevs(1); thrust::host_vector<float> h_means(1,0.f); thrust::host_vector<float> h_stdevs(1,0.f); float *g_input = thrust::raw_pointer_cast(d_edge_buf.data()); float *g_out_mean = thrust::raw_pointer_cast(d_means.data()); float *g_out_stdev = thrust::raw_pointer_cast(d_stdevs.data()); int len = bcols * bsize; int warpNum = ( len + 31 ) / 32; int Blk = warpNum * 32; hipEventRecord(start, 0); hipLaunchKernelGGL(( my_transform_reduce) , dim3(1), dim3(Blk) , 0, 0, g_input, g_out_mean, g_out_stdev, len, warpNum ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&local_ms, start, stop); printf("(my version) runtime = %lf (ms)\n", local_ms); h_means = d_means; h_stdevs= d_stdevs; //---------------------// // check output //---------------------// std::cout << "\noutput vals\n"; std::cout << "means = " << h_means[0] << std::endl; std::cout << "stdev = " << h_stdevs[0] << std::endl; } __global__ void my_transform_reduce(float *g_input, float *g_out_mean, float *g_out_stdev, const int len, const int warpNum ) { __shared__ float sm_val[32]; // max 32 waps __shared__ float sm_d2[32]; __shared__ float sm_mean; int lid = threadIdx.x; //--------------// // compute mean //--------------// float v = 0.f; float in_x = 0.f; if(lid < len) { in_x = v = g_input[lid]; // v for reduction, in_x for later use } int lane_id = lid % 32; int warp_id = lid / 32; for(int i= 16; i>0; i>>=1) { v += __shfl_down_sync(0xFFFFFFFF, v, i, 32); } if(lane_id == 0) { sm_val[warp_id] = v; } __syncthreads(); if(lid == 0) { float sum = 0.f; for(int i=0;i<warpNum;i++) { sum += sm_val[i]; } float result_mean = sum / float(len); g_out_mean[0] = result_mean; // compute mean sm_mean = result_mean; // update mean to shared mem } __syncthreads(); //--------------// // compute stdev //--------------// float diff2 = 0.f; if(lid < len) { float dif = in_x - sm_mean; // xi - u diff2 = dif * dif; } for(int i= 16; i>0; i>>=1) { diff2 += __shfl_down_sync(0xFFFFFFFF, diff2, i, 32); } if(lane_id == 0) { sm_d2[warp_id] = diff2; } __syncthreads(); if(lid == 0) { float stdev = 0.f; for(int i=0;i<warpNum;i++) { stdev += sm_d2[i]; } g_out_stdev[0] = sqrtf(stdev / float(len)); } }
test1.cu
#include <iostream> #include <thrust/version.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> //#include <thrust/fill.h> // examples // [1] https://thrust.github.io/doc/group__transformed__reductions_ga0d4232a9685675f488c3cc847111e48d.html template <typename T> struct summary_stats_data { T n, mean, M2; // min, max, void initialize() { n = mean = M2 = 0; } T stdev() { return std::sqrt( M2 / n ); } }; // stats_unary_op is a functor that takes in a value x and // returns a variace_data whose mean value is initialized to x. template <typename T> struct summary_stats_unary_op { __host__ __device__ summary_stats_data<T> operator()(const T& x) const { summary_stats_data<T> result; result.n = 1; result.mean = x; result.M2 = 0; return result; } }; // summary_stats_binary_op is a functor that accepts two summary_stats_data // structs and returns a new summary_stats_data which are an // approximation to the summary_stats for // all values that have been agregated so far template <typename T> struct summary_stats_binary_op : public thrust::binary_function<const summary_stats_data<T>&, const summary_stats_data<T>&, summary_stats_data<T> > { __host__ __device__ summary_stats_data<T> operator()(const summary_stats_data<T>& x, const summary_stats_data <T>& y) const { summary_stats_data<T> result; // precompute some common subexpressions T n = x.n + y.n; T delta = y.mean - x.mean; T delta2 = delta * delta; //Basic number of samples (n), min, and max result.n = n; result.mean = x.mean + delta * y.n / n; result.M2 = x.M2 + y.M2; result.M2 += delta2 * x.n * y.n / n; return result; } }; void thrust_transform_reduce(thrust::device_vector<float> &d_edge_buf, int bcols, int bsize); void my_version(thrust::device_vector<float> &d_edge_buf, int bcols, int bsize); __global__ void my_transform_reduce(float *g_input, float *g_out_mean, float *g_out_stdev, const int len, const int warpNum ); int main(void) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); printf("Device name: %s\n", prop.name); cudaSetDevice(0); int major = THRUST_MAJOR_VERSION; int minor = THRUST_MINOR_VERSION; std::cout << "Thrust v" << major << "." << minor << std::endl; //-----------------------------------------------------------------------// std::cout << "initialize data on the host\n"; int bcols = 3; int bsize = 4; int edge_buf_size = bcols * bsize; thrust::host_vector<float> h_edge_buf(edge_buf_size); for(int row=0; row<bsize; row++) { for(int col=0; col<bcols; col++) { h_edge_buf[row * bcols + col] = (float)(col + 1); std::cout << h_edge_buf[row * bcols + col] << " "; } std::cout << std::endl; } std::cout << std::endl; //-----------------------------------------------------------------------// std::cout << "\ncopy host to device\n"; thrust::device_vector<float> d_edge_buf=h_edge_buf; //-----------------------------------------------------------------------// std::cout << "\ntesting thrust::transform_reduce\n"; thrust_transform_reduce(d_edge_buf, bcols, bsize); //-----------------------------------------------------------------------// std::cout << "\ntesting customized version \n"; my_version(d_edge_buf, bcols, bsize); return 0; } void thrust_transform_reduce(thrust::device_vector<float> &d_edge_buf, int bcols, int bsize) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float local_ms = 0.f; summary_stats_unary_op<float> unary_op; summary_stats_binary_op<float> binary_op; summary_stats_data<float> stat_init; summary_stats_data<float> stat_result; thrust::host_vector<float> h_means(1,0.f); thrust::host_vector<float> h_stdevs(1,0.f); stat_init.initialize(); cudaEventRecord(start, 0); // gpu code stat_result = thrust::transform_reduce(d_edge_buf.begin(), d_edge_buf.end(), unary_op, stat_init, binary_op); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&local_ms, start, stop); printf("(thrust::transform_reduce) runtime = %lf (ms)\n", local_ms); h_means[0] = stat_result.mean; h_stdevs[0] = stat_result.stdev(); std::cout << "means = " << h_means[0] << std::endl; std::cout << "stdev = " << h_stdevs[0] << std::endl; cudaEventDestroy(start); cudaEventDestroy(stop); } void my_version(thrust::device_vector<float> &d_edge_buf, int bcols, int bsize) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float local_ms = 0.f; thrust::device_vector<float> d_means(1); thrust::device_vector<float> d_stdevs(1); thrust::host_vector<float> h_means(1,0.f); thrust::host_vector<float> h_stdevs(1,0.f); float *g_input = thrust::raw_pointer_cast(d_edge_buf.data()); float *g_out_mean = thrust::raw_pointer_cast(d_means.data()); float *g_out_stdev = thrust::raw_pointer_cast(d_stdevs.data()); int len = bcols * bsize; int warpNum = ( len + 31 ) / 32; int Blk = warpNum * 32; cudaEventRecord(start, 0); my_transform_reduce <<< 1, Blk >>> (g_input, g_out_mean, g_out_stdev, len, warpNum ); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&local_ms, start, stop); printf("(my version) runtime = %lf (ms)\n", local_ms); h_means = d_means; h_stdevs= d_stdevs; //---------------------// // check output //---------------------// std::cout << "\noutput vals\n"; std::cout << "means = " << h_means[0] << std::endl; std::cout << "stdev = " << h_stdevs[0] << std::endl; } __global__ void my_transform_reduce(float *g_input, float *g_out_mean, float *g_out_stdev, const int len, const int warpNum ) { __shared__ float sm_val[32]; // max 32 waps __shared__ float sm_d2[32]; __shared__ float sm_mean; int lid = threadIdx.x; //--------------// // compute mean //--------------// float v = 0.f; float in_x = 0.f; if(lid < len) { in_x = v = g_input[lid]; // v for reduction, in_x for later use } int lane_id = lid % 32; int warp_id = lid / 32; for(int i= 16; i>0; i>>=1) { v += __shfl_down_sync(0xFFFFFFFF, v, i, 32); } if(lane_id == 0) { sm_val[warp_id] = v; } __syncthreads(); if(lid == 0) { float sum = 0.f; for(int i=0;i<warpNum;i++) { sum += sm_val[i]; } float result_mean = sum / float(len); g_out_mean[0] = result_mean; // compute mean sm_mean = result_mean; // update mean to shared mem } __syncthreads(); //--------------// // compute stdev //--------------// float diff2 = 0.f; if(lid < len) { float dif = in_x - sm_mean; // xi - u diff2 = dif * dif; } for(int i= 16; i>0; i>>=1) { diff2 += __shfl_down_sync(0xFFFFFFFF, diff2, i, 32); } if(lane_id == 0) { sm_d2[warp_id] = diff2; } __syncthreads(); if(lid == 0) { float stdev = 0.f; for(int i=0;i<warpNum;i++) { stdev += sm_d2[i]; } g_out_stdev[0] = sqrtf(stdev / float(len)); } }
bd5eef2aa3d84e5a50ac8698786f77cff17dda4b.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2014 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* This example demonstrates how to use the CUDA C bindings to OpenGL ES to dynamically modify a vertex buffer using a CUDA C kernel. The steps are: 1. Create an empty vertex buffer object (VBO) 2. Register the VBO with CUDA C 3. Map the VBO for writing from CUDA C 4. Run CUDA C kernel to modify the vertex positions 5. Unmap the VBO 6. Render the results using OpenGL ES Host code */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <stdarg.h> #include <unistd.h> void error_exit(const char* format, ... ) { va_list args; va_start( args, format ); vfprintf( stderr, format, args ); va_end( args ); exit(1); } #define checkCUDAError() \ { \ hipError_t res = hipGetLastError();\ if (res != hipSuccess)\ {\ fprintf(stderr, "Line %d: CUDA Error: %s\n", \ __LINE__, hipGetErrorString(res));\ hipDeviceReset();\ exit(1);\ }\ } #if 0 #include "graphics_interface.c" #else #include "graphics_interface_egloutput_via_egl.c" #endif #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // includes, cuda #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h #include <timer.h> // timing functions // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check //#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop #include <hip/hip_vector_types.h> #define MAX_EPSILON_ERROR 0.0f #define THRESHOLD 0.0f #define REFRESH_DELAY 1 //ms #define GUI_IDLE 0x100 #define GUI_ROTATE 0x101 #define GUI_TRANSLATE 0x102 int gui_mode; //////////////////////////////////////////////////////////////////////////////// // constants const unsigned int window_width = 512; const unsigned int window_height = 512; const unsigned int mesh_width = 256; const unsigned int mesh_height = 256; // OpenGL ES variables and interop with CUDA C GLuint mesh_vao, mesh_vbo; struct cudaGraphicsResource *cuda_vbo_resource; void *d_vbo_buffer = NULL; float g_fAnim = 0.0; // UI / mouse controls int mouse_old_x, mouse_old_y; int mouse_buttons = 0; float rotate_x = 0.0, rotate_y = 0.0; float translate_z = -3.0; StopWatchInterface *timer = NULL; // Frame statistics int frame; int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling int g_Index = 0; float avgFPS = 0.0f; unsigned int frameCount = 0; unsigned int g_TotalErrors = 0; // Auto-Verification Code bool g_bQAReadback = false; int *pArgc = NULL; char **pArgv = NULL; #define MAX(a,b) ((a > b) ? a : b) //////////////////////////////////////////////////////////////////////////////// // declaration, forward // CUDA functionality void runCuda(struct cudaGraphicsResource **vbo_resource); void runAutoTest(int devID, char **argv, char *ref_file); void checkResultCuda(int argc, char **argv, const GLuint &vbo); const char *sSDKsample = "simpleGLES (VBO)"; void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)MAX(avgFPS, 1.f); sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "Cuda/OpenGL ES Interop (VBO): %3.1f fps (Max 1000 fps)", avgFPS); graphics_set_windowtitle(fps); } /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; // write output vertex pos[y*width+x] = make_float4(u, w, v, 1.0f); } void launch_kernel(float4 *pos, unsigned int mesh_width, unsigned int mesh_height, float time) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); hipLaunchKernelGGL(( simple_vbo_kernel), dim3(grid), dim3(block), 0, 0, pos, mesh_width, mesh_height, time); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource) { // map OpenGL buffer object for writing from CUDA float4 *dptr; hipGraphicsMapResources(1, vbo_resource, 0); size_t num_bytes; hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource); //printf("Sample CUDA mapped VBO: May access %ld bytes\n", num_bytes); // execute the kernel // dim3 block(8, 8, 1); // dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); // kernel<<< grid, block>>>(dptr, mesh_width, mesh_height, g_fAnim); launch_kernel(dptr, mesh_width, mesh_height, g_fAnim); // unmap buffer object hipGraphicsUnmapResources(1, vbo_resource, 0); } #ifdef _WIN32 #ifndef FOPEN #define FOPEN(fHandle,filename,mode) fopen_s(&fHandle, filename, mode) #endif #else #ifndef FOPEN #define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode)) #endif #endif void sdkDumpBin2(void *data, unsigned int bytes, const char *filename) { printf("sdkDumpBin: <%s>\n", filename); FILE *fp; FOPEN(fp, filename, "wb"); fwrite(data, bytes, 1, fp); fflush(fp); fclose(fp); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runAutoTest(int devID, char **argv, char *ref_file) { char *reference_file = NULL; void *imageData = malloc(mesh_width*mesh_height*sizeof(float)); // execute the kernel launch_kernel((float4 *)d_vbo_buffer, mesh_width, mesh_height, g_fAnim); hipDeviceSynchronize(); getLastCudaError("launch_kernel failed"); hipMemcpy(imageData, d_vbo_buffer, mesh_width*mesh_height*sizeof(float), hipMemcpyDeviceToHost); sdkDumpBin2(imageData, mesh_width*mesh_height*sizeof(float), "simpleGL.bin"); reference_file = sdkFindFilePath(ref_file, argv[0]); if (reference_file && !sdkCompareBin2BinFloat("simpleGL.bin", reference_file, mesh_width*mesh_height*sizeof(float), MAX_EPSILON_ERROR, THRESHOLD, pArgv[0])) { g_TotalErrors++; } } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display_thisframe(float time_delta) { sdkStartTimer(&timer); // run CUDA kernel to generate vertex positions runCuda(&cuda_vbo_resource); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //GET_GLERROR(0); // set view matrix: broken, it doesn't work in OpenGL ES! Must put into shader //glMatrixMode(GL_MODELVIEW); //glLoadIdentity(); //glTranslatef(0.0, 0.0, translate_z); //glRotatef(rotate_x, 1.0, 0.0, 0.0); //glRotatef(rotate_y, 0.0, 1.0, 0.0); glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height); //GET_GLERROR(0); glFinish(); //GET_GLERROR(0); g_fAnim += time_delta; sdkStopTimer(&timer); computeFPS(); } //////////////////////////////////////////////////////////////////////////////// //! Check if the result is correct or write data to file for external //! regression testing //////////////////////////////////////////////////////////////////////////////// void checkResultCuda(int argc, char **argv, const GLuint &vbo) { if (!d_vbo_buffer) { printf("%s: Mapping result buffer from OpenGL ES\n", __FUNCTION__); hipGraphicsUnregisterResource(cuda_vbo_resource); // map buffer object glBindBuffer(GL_ARRAY_BUFFER, vbo); float *data = (float *) glMapBufferRange(GL_ARRAY_BUFFER, 0, mesh_width * mesh_height * 4 * sizeof(float), GL_READ_ONLY); // check result if (checkCmdLineFlag(argc, (const char **) argv, "regression")) { // write file for regression test sdkWriteFile<float>("./data/regression.dat", data, mesh_width * mesh_height * 3, 0.0, false); } // unmap GL buffer object if (!glUnmapBuffer(GL_ARRAY_BUFFER)) { fprintf(stderr, "Unmap buffer failed.\n"); fflush(stderr); } checkCudaErrors(hipGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo, hipGraphicsMapFlagsWriteDiscard)); GET_GLERROR(0); } } GLuint mesh_shader = 0; void readAndCompileShaderFromGLSLFile(GLuint new_shaderprogram, const char *filename, GLenum shaderType) { FILE *file = fopen(filename,"rb"); // open shader text file if (!file) error_exit("Filename %s does not exist\n", filename); /* get the size of the file and read it */ fseek(file,0,SEEK_END); GLint size = ftell(file); char *data = (char*)malloc(sizeof(char)*(size + 1)); memset(data, 0, sizeof(char)*(size + 1)); fseek(file,0,SEEK_SET); size_t res = fread(data,1,size,file); fclose(file); GLuint shader = glCreateShader(shaderType); glShaderSource(shader, 1, (const GLchar**)&data, &size); glCompileShader(shader); GET_GLERROR(0); GLint compile_success = 0; glGetShaderiv(shader, GL_COMPILE_STATUS, &compile_success); GET_GLERROR(0); if (compile_success == GL_FALSE) { printf("Compilation of %s failed!\n Reason:\n", filename); GLint maxLength = 0; glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &maxLength); char errorLog[maxLength]; glGetShaderInfoLog(shader, maxLength, &maxLength, &errorLog[0]); printf("%s", errorLog); glDeleteShader(shader); exit(1); } glAttachShader(new_shaderprogram, shader); glDeleteShader(shader); // good to do? free(data); } GLuint ShaderCreate(const char *vshader_filename, const char *fshader_filename) { printf("Loading GLSL shaders %s %s\n", vshader_filename, fshader_filename); GLuint new_shaderprogram = glCreateProgram(); GET_GLERROR(0); if (vshader_filename) readAndCompileShaderFromGLSLFile(new_shaderprogram, vshader_filename, GL_VERTEX_SHADER); GET_GLERROR(0); if (fshader_filename) readAndCompileShaderFromGLSLFile(new_shaderprogram, fshader_filename, GL_FRAGMENT_SHADER); GET_GLERROR(0); glLinkProgram(new_shaderprogram); GET_GLERROR(0); GLint link_success; glGetProgramiv(new_shaderprogram, GL_LINK_STATUS, &link_success); if (link_success == GL_FALSE) { printf("Linking of %s with %s failed!\n Reason:\n", vshader_filename, fshader_filename); GLint maxLength = 0; glGetShaderiv(new_shaderprogram, GL_INFO_LOG_LENGTH, &maxLength); char errorLog[maxLength]; glGetShaderInfoLog(new_shaderprogram, maxLength, &maxLength, &errorLog[0]); printf("%s", errorLog); exit(EXIT_FAILURE); } return new_shaderprogram; } //=========================================================================== // InitGraphicsState() - initialize OpenGL //=========================================================================== static void InitGraphicsState(char **argv) { char *GL_version=(char *)glGetString(GL_VERSION); char *GL_vendor=(char *)glGetString(GL_VENDOR); char *GL_renderer=(char *)glGetString(GL_RENDERER); printf("Version: %s\n", GL_version); printf("Vendor: %s\n", GL_vendor); printf("Renderer: %s\n", GL_renderer); // RENDERING SETUP (OpenGL ES or OpenGL Core Profile!) glGenVertexArrays(1, &mesh_vao); // Features' Vertex Array Object allocation glBindVertexArray(mesh_vao); // bind VAO // initialize buffer object glGenBuffers(1, &mesh_vbo); glBindBuffer(GL_ARRAY_BUFFER, mesh_vbo); unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, NULL, GL_DYNAMIC_DRAW); glVertexAttribPointer((GLuint)0, 4, GL_FLOAT, GL_FALSE, 0, 0); glEnableVertexAttribArray(0); hipGraphicsGLRegisterBuffer(&cuda_vbo_resource, mesh_vbo, hipGraphicsMapFlagsNone); checkCUDAError(); //glBindVertexArray(0); // keep above Vertex Array Object bound (it's the only one throughout) // GLSL stuff char *vertex_shader_path = sdkFindFilePath("mesh.vert.glsl", argv[0]); char *fragment_shader_path = sdkFindFilePath("mesh.frag.glsl", argv[0]); if (vertex_shader_path == NULL || fragment_shader_path == NULL) { printf("Error finding shader file\n"); exit(EXIT_FAILURE); } mesh_shader = ShaderCreate(vertex_shader_path, fragment_shader_path); GET_GLERROR(0); free(vertex_shader_path); free(fragment_shader_path); glUseProgram(mesh_shader); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// bool runTest(int argc, char **argv, char *ref_file) { // Create the CUTIL timer sdkCreateTimer(&timer); // command line mode only if (ref_file != NULL) { // This will pick the best possible CUDA capable device int devID = findCudaDevice(argc, (const char **)argv); // create VBO checkCudaErrors(hipMalloc((void **)&d_vbo_buffer, mesh_width*mesh_height*4*sizeof(float))); // run the cuda part runAutoTest(devID, argv, ref_file); // check result of Cuda step checkResultCuda(argc, argv, mesh_vbo); hipFree(d_vbo_buffer); d_vbo_buffer = NULL; } else { // this would use command-line specified CUDA device, note that CUDA defaults to highest Gflops/s device if (checkCmdLineFlag(argc, (const char **)argv, "device")) error_exit("Device setting not yet implemented!\n"); // create X11 window and set up associated OpenGL ES context graphics_setup_window(0,0, window_width, window_height, sSDKsample); InitGraphicsState(argv); // set up GLES stuff glClearColor( 0, 0.5, 1, 1 ); // blue-ish background glClear( GL_COLOR_BUFFER_BIT ); //printf("WP%d\n", __LINE__); graphics_swap_buffers(); XEvent event; KeySym key; char text[255]; int frame = 0; while (frame < 1000) { #if 0 if (XPending(display)) { XNextEvent(display, &event); if (event.type==Expose && event.xexpose.count==0) { printf("Redraw requested!\n"); } if (event.type==KeyPress&& XLookupString(&event.xkey,text,255,&key,0)==1) { if (text[0] == 27) goto label_stop_x; printf("You pressed the %c key!\n",text[0]); } if (event.type==ButtonPress) { printf("Mouse button %d press at (%d,%d)\n", event.xbutton.button, event.xbutton.x,event.xbutton.y); if (event.xbutton.button == Button1) gui_mode = GUI_TRANSLATE; if (event.xbutton.button == Button3) gui_mode = GUI_ROTATE; mouse_old_x = event.xbutton.x; mouse_old_y = event.xbutton.y; } if (event.type==ButtonRelease) { printf("Mouse button %d released at (%d,%d)\n", event.xbutton.button, event.xbutton.x,event.xbutton.y); gui_mode = GUI_IDLE; mouse_old_x = event.xbutton.x; mouse_old_y = event.xbutton.y; } if (event.type == MotionNotify) { //printf("Mouse motion towards %d %d, GUI mode is 0x%x\n", // event.xmotion.x, event.xmotion.y, gui_mode); float dx, dy; dx = (float)(event.xmotion.x - mouse_old_x); dy = (float)(event.xmotion.y - mouse_old_y); if (gui_mode == GUI_ROTATE) { rotate_x += dy * 0.2f; rotate_y += dx * 0.2f; printf("rot x %f y %f\n", rotate_x, rotate_y); } if (gui_mode == GUI_TRANSLATE) { translate_z += dy * 0.01f; printf("translate z %f\n", translate_z); } mouse_old_x = event.xmotion.x; mouse_old_y = event.xmotion.y; } } #endif display_thisframe(0.010); usleep(1000); // need not take full CPU and GPU graphics_swap_buffers(); //printf("frame %d\n",frame++); } label_stop_x: // NOTE: Before destroying OpenGL ES context, must unregister all shared resources from CUDA ! hipGraphicsUnregisterResource(cuda_vbo_resource); graphics_close_window(); // close window and destroy OpenGL ES context sdkDeleteTimer(&timer); } return true; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { char *ref_file = NULL; pArgc = &argc; pArgv = argv; #if defined(__linux__) setenv ("DISPLAY", ":0", 0); #endif printf("%s starting...\n", sSDKsample); if (argc > 1) { if (checkCmdLineFlag(argc, (const char **)argv, "file")) { // In this mode, we run without OpenGL and see if VBO is generated correctly getCmdLineArgumentString(argc, (const char **)argv, "file", (char **)&ref_file); } } printf("\n"); runTest(argc, argv, ref_file); printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!"); exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE); }
bd5eef2aa3d84e5a50ac8698786f77cff17dda4b.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2014 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* This example demonstrates how to use the CUDA C bindings to OpenGL ES to dynamically modify a vertex buffer using a CUDA C kernel. The steps are: 1. Create an empty vertex buffer object (VBO) 2. Register the VBO with CUDA C 3. Map the VBO for writing from CUDA C 4. Run CUDA C kernel to modify the vertex positions 5. Unmap the VBO 6. Render the results using OpenGL ES Host code */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <stdarg.h> #include <unistd.h> void error_exit(const char* format, ... ) { va_list args; va_start( args, format ); vfprintf( stderr, format, args ); va_end( args ); exit(1); } #define checkCUDAError() \ { \ cudaError_t res = cudaGetLastError();\ if (res != cudaSuccess)\ {\ fprintf(stderr, "Line %d: CUDA Error: %s\n", \ __LINE__, cudaGetErrorString(res));\ cudaThreadExit();\ exit(1);\ }\ } #if 0 #include "graphics_interface.c" #else #include "graphics_interface_egloutput_via_egl.c" #endif #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // includes, cuda #include <cuda_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h #include <timer.h> // timing functions // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check //#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop #include <vector_types.h> #define MAX_EPSILON_ERROR 0.0f #define THRESHOLD 0.0f #define REFRESH_DELAY 1 //ms #define GUI_IDLE 0x100 #define GUI_ROTATE 0x101 #define GUI_TRANSLATE 0x102 int gui_mode; //////////////////////////////////////////////////////////////////////////////// // constants const unsigned int window_width = 512; const unsigned int window_height = 512; const unsigned int mesh_width = 256; const unsigned int mesh_height = 256; // OpenGL ES variables and interop with CUDA C GLuint mesh_vao, mesh_vbo; struct cudaGraphicsResource *cuda_vbo_resource; void *d_vbo_buffer = NULL; float g_fAnim = 0.0; // UI / mouse controls int mouse_old_x, mouse_old_y; int mouse_buttons = 0; float rotate_x = 0.0, rotate_y = 0.0; float translate_z = -3.0; StopWatchInterface *timer = NULL; // Frame statistics int frame; int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling int g_Index = 0; float avgFPS = 0.0f; unsigned int frameCount = 0; unsigned int g_TotalErrors = 0; // Auto-Verification Code bool g_bQAReadback = false; int *pArgc = NULL; char **pArgv = NULL; #define MAX(a,b) ((a > b) ? a : b) //////////////////////////////////////////////////////////////////////////////// // declaration, forward // CUDA functionality void runCuda(struct cudaGraphicsResource **vbo_resource); void runAutoTest(int devID, char **argv, char *ref_file); void checkResultCuda(int argc, char **argv, const GLuint &vbo); const char *sSDKsample = "simpleGLES (VBO)"; void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)MAX(avgFPS, 1.f); sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "Cuda/OpenGL ES Interop (VBO): %3.1f fps (Max 1000 fps)", avgFPS); graphics_set_windowtitle(fps); } /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; // write output vertex pos[y*width+x] = make_float4(u, w, v, 1.0f); } void launch_kernel(float4 *pos, unsigned int mesh_width, unsigned int mesh_height, float time) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); simple_vbo_kernel<<< grid, block>>>(pos, mesh_width, mesh_height, time); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource) { // map OpenGL buffer object for writing from CUDA float4 *dptr; cudaGraphicsMapResources(1, vbo_resource, 0); size_t num_bytes; cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource); //printf("Sample CUDA mapped VBO: May access %ld bytes\n", num_bytes); // execute the kernel // dim3 block(8, 8, 1); // dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); // kernel<<< grid, block>>>(dptr, mesh_width, mesh_height, g_fAnim); launch_kernel(dptr, mesh_width, mesh_height, g_fAnim); // unmap buffer object cudaGraphicsUnmapResources(1, vbo_resource, 0); } #ifdef _WIN32 #ifndef FOPEN #define FOPEN(fHandle,filename,mode) fopen_s(&fHandle, filename, mode) #endif #else #ifndef FOPEN #define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode)) #endif #endif void sdkDumpBin2(void *data, unsigned int bytes, const char *filename) { printf("sdkDumpBin: <%s>\n", filename); FILE *fp; FOPEN(fp, filename, "wb"); fwrite(data, bytes, 1, fp); fflush(fp); fclose(fp); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runAutoTest(int devID, char **argv, char *ref_file) { char *reference_file = NULL; void *imageData = malloc(mesh_width*mesh_height*sizeof(float)); // execute the kernel launch_kernel((float4 *)d_vbo_buffer, mesh_width, mesh_height, g_fAnim); cudaDeviceSynchronize(); getLastCudaError("launch_kernel failed"); cudaMemcpy(imageData, d_vbo_buffer, mesh_width*mesh_height*sizeof(float), cudaMemcpyDeviceToHost); sdkDumpBin2(imageData, mesh_width*mesh_height*sizeof(float), "simpleGL.bin"); reference_file = sdkFindFilePath(ref_file, argv[0]); if (reference_file && !sdkCompareBin2BinFloat("simpleGL.bin", reference_file, mesh_width*mesh_height*sizeof(float), MAX_EPSILON_ERROR, THRESHOLD, pArgv[0])) { g_TotalErrors++; } } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display_thisframe(float time_delta) { sdkStartTimer(&timer); // run CUDA kernel to generate vertex positions runCuda(&cuda_vbo_resource); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //GET_GLERROR(0); // set view matrix: broken, it doesn't work in OpenGL ES! Must put into shader //glMatrixMode(GL_MODELVIEW); //glLoadIdentity(); //glTranslatef(0.0, 0.0, translate_z); //glRotatef(rotate_x, 1.0, 0.0, 0.0); //glRotatef(rotate_y, 0.0, 1.0, 0.0); glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height); //GET_GLERROR(0); glFinish(); //GET_GLERROR(0); g_fAnim += time_delta; sdkStopTimer(&timer); computeFPS(); } //////////////////////////////////////////////////////////////////////////////// //! Check if the result is correct or write data to file for external //! regression testing //////////////////////////////////////////////////////////////////////////////// void checkResultCuda(int argc, char **argv, const GLuint &vbo) { if (!d_vbo_buffer) { printf("%s: Mapping result buffer from OpenGL ES\n", __FUNCTION__); cudaGraphicsUnregisterResource(cuda_vbo_resource); // map buffer object glBindBuffer(GL_ARRAY_BUFFER, vbo); float *data = (float *) glMapBufferRange(GL_ARRAY_BUFFER, 0, mesh_width * mesh_height * 4 * sizeof(float), GL_READ_ONLY); // check result if (checkCmdLineFlag(argc, (const char **) argv, "regression")) { // write file for regression test sdkWriteFile<float>("./data/regression.dat", data, mesh_width * mesh_height * 3, 0.0, false); } // unmap GL buffer object if (!glUnmapBuffer(GL_ARRAY_BUFFER)) { fprintf(stderr, "Unmap buffer failed.\n"); fflush(stderr); } checkCudaErrors(cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo, cudaGraphicsMapFlagsWriteDiscard)); GET_GLERROR(0); } } GLuint mesh_shader = 0; void readAndCompileShaderFromGLSLFile(GLuint new_shaderprogram, const char *filename, GLenum shaderType) { FILE *file = fopen(filename,"rb"); // open shader text file if (!file) error_exit("Filename %s does not exist\n", filename); /* get the size of the file and read it */ fseek(file,0,SEEK_END); GLint size = ftell(file); char *data = (char*)malloc(sizeof(char)*(size + 1)); memset(data, 0, sizeof(char)*(size + 1)); fseek(file,0,SEEK_SET); size_t res = fread(data,1,size,file); fclose(file); GLuint shader = glCreateShader(shaderType); glShaderSource(shader, 1, (const GLchar**)&data, &size); glCompileShader(shader); GET_GLERROR(0); GLint compile_success = 0; glGetShaderiv(shader, GL_COMPILE_STATUS, &compile_success); GET_GLERROR(0); if (compile_success == GL_FALSE) { printf("Compilation of %s failed!\n Reason:\n", filename); GLint maxLength = 0; glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &maxLength); char errorLog[maxLength]; glGetShaderInfoLog(shader, maxLength, &maxLength, &errorLog[0]); printf("%s", errorLog); glDeleteShader(shader); exit(1); } glAttachShader(new_shaderprogram, shader); glDeleteShader(shader); // good to do? free(data); } GLuint ShaderCreate(const char *vshader_filename, const char *fshader_filename) { printf("Loading GLSL shaders %s %s\n", vshader_filename, fshader_filename); GLuint new_shaderprogram = glCreateProgram(); GET_GLERROR(0); if (vshader_filename) readAndCompileShaderFromGLSLFile(new_shaderprogram, vshader_filename, GL_VERTEX_SHADER); GET_GLERROR(0); if (fshader_filename) readAndCompileShaderFromGLSLFile(new_shaderprogram, fshader_filename, GL_FRAGMENT_SHADER); GET_GLERROR(0); glLinkProgram(new_shaderprogram); GET_GLERROR(0); GLint link_success; glGetProgramiv(new_shaderprogram, GL_LINK_STATUS, &link_success); if (link_success == GL_FALSE) { printf("Linking of %s with %s failed!\n Reason:\n", vshader_filename, fshader_filename); GLint maxLength = 0; glGetShaderiv(new_shaderprogram, GL_INFO_LOG_LENGTH, &maxLength); char errorLog[maxLength]; glGetShaderInfoLog(new_shaderprogram, maxLength, &maxLength, &errorLog[0]); printf("%s", errorLog); exit(EXIT_FAILURE); } return new_shaderprogram; } //=========================================================================== // InitGraphicsState() - initialize OpenGL //=========================================================================== static void InitGraphicsState(char **argv) { char *GL_version=(char *)glGetString(GL_VERSION); char *GL_vendor=(char *)glGetString(GL_VENDOR); char *GL_renderer=(char *)glGetString(GL_RENDERER); printf("Version: %s\n", GL_version); printf("Vendor: %s\n", GL_vendor); printf("Renderer: %s\n", GL_renderer); // RENDERING SETUP (OpenGL ES or OpenGL Core Profile!) glGenVertexArrays(1, &mesh_vao); // Features' Vertex Array Object allocation glBindVertexArray(mesh_vao); // bind VAO // initialize buffer object glGenBuffers(1, &mesh_vbo); glBindBuffer(GL_ARRAY_BUFFER, mesh_vbo); unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, NULL, GL_DYNAMIC_DRAW); glVertexAttribPointer((GLuint)0, 4, GL_FLOAT, GL_FALSE, 0, 0); glEnableVertexAttribArray(0); cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, mesh_vbo, cudaGraphicsMapFlagsNone); checkCUDAError(); //glBindVertexArray(0); // keep above Vertex Array Object bound (it's the only one throughout) // GLSL stuff char *vertex_shader_path = sdkFindFilePath("mesh.vert.glsl", argv[0]); char *fragment_shader_path = sdkFindFilePath("mesh.frag.glsl", argv[0]); if (vertex_shader_path == NULL || fragment_shader_path == NULL) { printf("Error finding shader file\n"); exit(EXIT_FAILURE); } mesh_shader = ShaderCreate(vertex_shader_path, fragment_shader_path); GET_GLERROR(0); free(vertex_shader_path); free(fragment_shader_path); glUseProgram(mesh_shader); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// bool runTest(int argc, char **argv, char *ref_file) { // Create the CUTIL timer sdkCreateTimer(&timer); // command line mode only if (ref_file != NULL) { // This will pick the best possible CUDA capable device int devID = findCudaDevice(argc, (const char **)argv); // create VBO checkCudaErrors(cudaMalloc((void **)&d_vbo_buffer, mesh_width*mesh_height*4*sizeof(float))); // run the cuda part runAutoTest(devID, argv, ref_file); // check result of Cuda step checkResultCuda(argc, argv, mesh_vbo); cudaFree(d_vbo_buffer); d_vbo_buffer = NULL; } else { // this would use command-line specified CUDA device, note that CUDA defaults to highest Gflops/s device if (checkCmdLineFlag(argc, (const char **)argv, "device")) error_exit("Device setting not yet implemented!\n"); // create X11 window and set up associated OpenGL ES context graphics_setup_window(0,0, window_width, window_height, sSDKsample); InitGraphicsState(argv); // set up GLES stuff glClearColor( 0, 0.5, 1, 1 ); // blue-ish background glClear( GL_COLOR_BUFFER_BIT ); //printf("WP%d\n", __LINE__); graphics_swap_buffers(); XEvent event; KeySym key; char text[255]; int frame = 0; while (frame < 1000) { #if 0 if (XPending(display)) { XNextEvent(display, &event); if (event.type==Expose && event.xexpose.count==0) { printf("Redraw requested!\n"); } if (event.type==KeyPress&& XLookupString(&event.xkey,text,255,&key,0)==1) { if (text[0] == 27) goto label_stop_x; printf("You pressed the %c key!\n",text[0]); } if (event.type==ButtonPress) { printf("Mouse button %d press at (%d,%d)\n", event.xbutton.button, event.xbutton.x,event.xbutton.y); if (event.xbutton.button == Button1) gui_mode = GUI_TRANSLATE; if (event.xbutton.button == Button3) gui_mode = GUI_ROTATE; mouse_old_x = event.xbutton.x; mouse_old_y = event.xbutton.y; } if (event.type==ButtonRelease) { printf("Mouse button %d released at (%d,%d)\n", event.xbutton.button, event.xbutton.x,event.xbutton.y); gui_mode = GUI_IDLE; mouse_old_x = event.xbutton.x; mouse_old_y = event.xbutton.y; } if (event.type == MotionNotify) { //printf("Mouse motion towards %d %d, GUI mode is 0x%x\n", // event.xmotion.x, event.xmotion.y, gui_mode); float dx, dy; dx = (float)(event.xmotion.x - mouse_old_x); dy = (float)(event.xmotion.y - mouse_old_y); if (gui_mode == GUI_ROTATE) { rotate_x += dy * 0.2f; rotate_y += dx * 0.2f; printf("rot x %f y %f\n", rotate_x, rotate_y); } if (gui_mode == GUI_TRANSLATE) { translate_z += dy * 0.01f; printf("translate z %f\n", translate_z); } mouse_old_x = event.xmotion.x; mouse_old_y = event.xmotion.y; } } #endif display_thisframe(0.010); usleep(1000); // need not take full CPU and GPU graphics_swap_buffers(); //printf("frame %d\n",frame++); } label_stop_x: // NOTE: Before destroying OpenGL ES context, must unregister all shared resources from CUDA ! cudaGraphicsUnregisterResource(cuda_vbo_resource); graphics_close_window(); // close window and destroy OpenGL ES context sdkDeleteTimer(&timer); } return true; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { char *ref_file = NULL; pArgc = &argc; pArgv = argv; #if defined(__linux__) setenv ("DISPLAY", ":0", 0); #endif printf("%s starting...\n", sSDKsample); if (argc > 1) { if (checkCmdLineFlag(argc, (const char **)argv, "file")) { // In this mode, we run without OpenGL and see if VBO is generated correctly getCmdLineArgumentString(argc, (const char **)argv, "file", (char **)&ref_file); } } printf("\n"); runTest(argc, argv, ref_file); printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!"); exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE); }
d40b9c11bf66662a39c01fdbd8a1f63954e07870.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef TESTSCRATCH DD.step++; float Sumscratchval=0.0f, Maxscratchval=0.0f; for (int jscratch = 0; jscratch < ASCRATCH; jscratch ++) { val2_scratchpad[jscratch + scrglobal - scratchpad_matrix] = Scratchpad[jscratch]; // scratchpad image validation int delta = jscratch + scrglobal - scratchpad_matrix - lostpixels; // int x = delta%XSCRATCH; int y = delta/YSCRATCH; Sumscratchval += val2_scratchpad[jscratch + scrglobal - scratchpad_matrix]; Maxscratchval = max(Scratchpad[jscratch], Maxscratchval); if(*(Scratchpad + jscratch) != 0.0f && !ithreads) printf("DEVICE: \u2463 SCRATCHPAD distrib_number %d itb %d delta %d position (x*y) (%d*%d) position in scratchpad %d value %f Sum %f max %f\n", distrib_number, itb, delta, (delta-DD.lostpixelsdevice)%(XSCRATCH*tilexdevice), (delta - DD.lostpixelsdevice)/(XSCRATCH*tilexdevice), jscratch, *(Scratchpad + jscratch), Sumscratchval, Maxscratchval); } __syncthreads(); if (!iprint) printf("end \u2463**********DEVICE: SCRATCHPAD *aggregate (%d*%d) *****************\n\n", aggregx, aggregy); __syncthreads(); if(((aggregx+1) == DD.NbAggregx) && ((aggregy+1) == DD.NbAggregy)) { if (!iprint) printf("DEVICE: \u2464 SUM SCRATCHPAD: Sum of scratchpad %5.1f Max of Scratchpad %5.1f \n", Sumscratchval, Maxscratchval); if (!iprint) timer = clock64(); if (!iprint) printf( "DEVICE: \u23f1**DEVICE: step %d TIMING (msec) ** processing %f this step %g total %g \n", DD.step, (float) (timer - time_start) / DD.clockRate, (float) ( time_start - time_init) / DD.clockRate, (float) (timer - time_init) / DD.clockRate); if (!iprint) printf("end \u2464****************DEVICE: SCRATCHPAD & AGGREGATES & TILES ********************\n\n"); } __syncthreads(); #endif
d40b9c11bf66662a39c01fdbd8a1f63954e07870.cu
#ifdef TESTSCRATCH DD.step++; float Sumscratchval=0.0f, Maxscratchval=0.0f; for (int jscratch = 0; jscratch < ASCRATCH; jscratch ++) { val2_scratchpad[jscratch + scrglobal - scratchpad_matrix] = Scratchpad[jscratch]; // scratchpad image validation int delta = jscratch + scrglobal - scratchpad_matrix - lostpixels; // int x = delta%XSCRATCH; int y = delta/YSCRATCH; Sumscratchval += val2_scratchpad[jscratch + scrglobal - scratchpad_matrix]; Maxscratchval = max(Scratchpad[jscratch], Maxscratchval); if(*(Scratchpad + jscratch) != 0.0f && !ithreads) printf("DEVICE: \u2463 SCRATCHPAD distrib_number %d itb %d delta %d position (x*y) (%d*%d) position in scratchpad %d value %f Sum %f max %f\n", distrib_number, itb, delta, (delta-DD.lostpixelsdevice)%(XSCRATCH*tilexdevice), (delta - DD.lostpixelsdevice)/(XSCRATCH*tilexdevice), jscratch, *(Scratchpad + jscratch), Sumscratchval, Maxscratchval); } __syncthreads(); if (!iprint) printf("end \u2463**********DEVICE: SCRATCHPAD *aggregate (%d*%d) *****************\n\n", aggregx, aggregy); __syncthreads(); if(((aggregx+1) == DD.NbAggregx) && ((aggregy+1) == DD.NbAggregy)) { if (!iprint) printf("DEVICE: \u2464 SUM SCRATCHPAD: Sum of scratchpad %5.1f Max of Scratchpad %5.1f \n", Sumscratchval, Maxscratchval); if (!iprint) timer = clock64(); if (!iprint) printf( "DEVICE: \u23f1**DEVICE: step %d TIMING (msec) ** processing %f this step %g total %g \n", DD.step, (float) (timer - time_start) / DD.clockRate, (float) ( time_start - time_init) / DD.clockRate, (float) (timer - time_init) / DD.clockRate); if (!iprint) printf("end \u2464****************DEVICE: SCRATCHPAD & AGGREGATES & TILES ********************\n\n"); } __syncthreads(); #endif
baa175b665b54c2d818d7a1bb2956ce68106c1f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea_im2col.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_ROCM template<typename Dtype> __global__ void SoftmaxLossForwardGPU(const int_tp nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int_tp num, const int_tp dim, const int_tp spatial_dim, const bool has_ignore_label_, const int_tp ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int_tp n = index / spatial_dim; const int_tp s = index % spatial_dim; const int_tp label_value = static_cast<int_tp>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log( max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } #endif // USE_ROCM template<typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype normalizer = LossLayer<Dtype>::GetNormalizer( normalization_, outer_num_, inner_num_, valid_count); top[0]->mutable_cpu_data()[0] = loss / normalizer; if (top.size() == 2) top[1]->ShareData(prob_); #endif // USE_ROCM } else { #ifdef USE_GREENTEA //TODO: should update to align with the CUDA implememtation viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); cl_mem prob_data = (cl_mem) (prob_.gpu_data()); cl_mem label = (cl_mem) (bottom[1]->gpu_data()); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; cl_mem loss_data = (cl_mem) (bottom[0]->mutable_gpu_diff()); cl_mem counts = (cl_mem) (prob_.mutable_gpu_diff()); viennacl::ocl::kernel &oclk_softmax_loss_forward = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_forward")); viennacl::ocl::enqueue( oclk_softmax_loss_forward(nthreads, WrapHandle(prob_data, &ctx), WrapHandle(label, &ctx), WrapHandle(loss_data, &ctx), outer_num_, dim, inner_num_, has_ignore_label_ ? 1 : 0, ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); if (this->device_->CheckCapability("cl_intel_subgroups")) { viennacl::ocl::kernel &oclk_softmax_loss_forward_asum = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_forward_asum")); int need_compute_count_sum = normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_; oclk_softmax_loss_forward_asum.local_work_size(0, 256); oclk_softmax_loss_forward_asum.local_work_size(1, 1); oclk_softmax_loss_forward_asum.local_work_size(2, 1); oclk_softmax_loss_forward_asum.global_work_size(0, 256); oclk_softmax_loss_forward_asum.global_work_size(1, 1); oclk_softmax_loss_forward_asum.global_work_size(2, 1); viennacl::ocl::enqueue( oclk_softmax_loss_forward_asum(nthreads, outer_num_, inner_num_, need_compute_count_sum, static_cast<int>(normalization_), WrapHandle(loss_data, &ctx), WrapHandle(counts, &ctx), WrapHandle( (cl_mem)top[0]->mutable_gpu_data(), &ctx)), ctx.get_queue()); } else { Dtype loss; greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, loss_data, 0, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, counts, 0, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); } if (top.size() >= 2) { top[1]->ShareData(prob_); } #endif // USE_GREENTEA } } #ifdef USE_ROCM template<typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int_tp nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int_tp num, const int_tp dim, const int_tp spatial_dim, const bool has_ignore_label_, const int_tp ignore_label_, Dtype* counts) { const int_tp channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int_tp n = index / spatial_dim; const int_tp s = index % spatial_dim; const int_tp label_value = static_cast<int_tp>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int_tp c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } #endif // USE_ROCM template<typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS) (nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); #endif // USE_ROCM } else { #ifdef USE_GREENTEA //TODO: should update to align with the CUDA implementation viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); cl_mem bottom_diff = (cl_mem)(bottom[0]->mutable_gpu_diff()); cl_mem prob_data = (cl_mem)(prob_.gpu_data()); cl_mem top_data = (cl_mem)(top[0]->gpu_data()); greentea_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, 0, bottom_diff, 0, &ctx); cl_mem label = (cl_mem)(bottom[1]->gpu_data()); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; cl_mem counts = (cl_mem)(prob_.mutable_gpu_diff()); viennacl::ocl::kernel &oclk_softmax_loss_backward = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_backward")); viennacl::ocl::enqueue( oclk_softmax_loss_backward(nthreads, WrapHandle(top_data, &ctx), WrapHandle(label, &ctx), WrapHandle(bottom_diff, &ctx), outer_num_, dim, inner_num_, has_ignore_label_ ? 1 : 0, ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); Dtype valid_count = -1; if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, counts, 0, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); greentea_gpu_scal<Dtype>(this->device_->id(), prob_.count(), loss_weight, bottom_diff, 0); #endif // USE_GREENTEA } } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
baa175b665b54c2d818d7a1bb2956ce68106c1f5.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea_im2col.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_CUDA template<typename Dtype> __global__ void SoftmaxLossForwardGPU(const int_tp nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int_tp num, const int_tp dim, const int_tp spatial_dim, const bool has_ignore_label_, const int_tp ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int_tp n = index / spatial_dim; const int_tp s = index % spatial_dim; const int_tp label_value = static_cast<int_tp>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log( max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } #endif // USE_CUDA template<typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype normalizer = LossLayer<Dtype>::GetNormalizer( normalization_, outer_num_, inner_num_, valid_count); top[0]->mutable_cpu_data()[0] = loss / normalizer; if (top.size() == 2) top[1]->ShareData(prob_); #endif // USE_CUDA } else { #ifdef USE_GREENTEA //TODO: should update to align with the CUDA implememtation viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); cl_mem prob_data = (cl_mem) (prob_.gpu_data()); cl_mem label = (cl_mem) (bottom[1]->gpu_data()); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; cl_mem loss_data = (cl_mem) (bottom[0]->mutable_gpu_diff()); cl_mem counts = (cl_mem) (prob_.mutable_gpu_diff()); viennacl::ocl::kernel &oclk_softmax_loss_forward = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_forward")); viennacl::ocl::enqueue( oclk_softmax_loss_forward(nthreads, WrapHandle(prob_data, &ctx), WrapHandle(label, &ctx), WrapHandle(loss_data, &ctx), outer_num_, dim, inner_num_, has_ignore_label_ ? 1 : 0, ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); if (this->device_->CheckCapability("cl_intel_subgroups")) { viennacl::ocl::kernel &oclk_softmax_loss_forward_asum = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_forward_asum")); int need_compute_count_sum = normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_; oclk_softmax_loss_forward_asum.local_work_size(0, 256); oclk_softmax_loss_forward_asum.local_work_size(1, 1); oclk_softmax_loss_forward_asum.local_work_size(2, 1); oclk_softmax_loss_forward_asum.global_work_size(0, 256); oclk_softmax_loss_forward_asum.global_work_size(1, 1); oclk_softmax_loss_forward_asum.global_work_size(2, 1); viennacl::ocl::enqueue( oclk_softmax_loss_forward_asum(nthreads, outer_num_, inner_num_, need_compute_count_sum, static_cast<int>(normalization_), WrapHandle(loss_data, &ctx), WrapHandle(counts, &ctx), WrapHandle( (cl_mem)top[0]->mutable_gpu_data(), &ctx)), ctx.get_queue()); } else { Dtype loss; greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, loss_data, 0, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, counts, 0, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); } if (top.size() >= 2) { top[1]->ShareData(prob_); } #endif // USE_GREENTEA } } #ifdef USE_CUDA template<typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int_tp nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int_tp num, const int_tp dim, const int_tp spatial_dim, const bool has_ignore_label_, const int_tp ignore_label_, Dtype* counts) { const int_tp channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int_tp n = index / spatial_dim; const int_tp s = index % spatial_dim; const int_tp label_value = static_cast<int_tp>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int_tp c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } #endif // USE_CUDA template<typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS) (nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); #endif // USE_CUDA } else { #ifdef USE_GREENTEA //TODO: should update to align with the CUDA implementation viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); cl_mem bottom_diff = (cl_mem)(bottom[0]->mutable_gpu_diff()); cl_mem prob_data = (cl_mem)(prob_.gpu_data()); cl_mem top_data = (cl_mem)(top[0]->gpu_data()); greentea_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, 0, bottom_diff, 0, &ctx); cl_mem label = (cl_mem)(bottom[1]->gpu_data()); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; cl_mem counts = (cl_mem)(prob_.mutable_gpu_diff()); viennacl::ocl::kernel &oclk_softmax_loss_backward = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_backward")); viennacl::ocl::enqueue( oclk_softmax_loss_backward(nthreads, WrapHandle(top_data, &ctx), WrapHandle(label, &ctx), WrapHandle(bottom_diff, &ctx), outer_num_, dim, inner_num_, has_ignore_label_ ? 1 : 0, ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); Dtype valid_count = -1; if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, counts, 0, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); greentea_gpu_scal<Dtype>(this->device_->id(), prob_.count(), loss_weight, bottom_diff, 0); #endif // USE_GREENTEA } } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
1133534d335a45d628dc509d5760637d7bd85106.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> __global__ void MatMult(double *dA, double *dB, double *dC, int nRows, int nInnerDimension, int nCols, int TileSize) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = nRows * TileSize * by; int aEnd = aBegin + nRows - 1; int aStep = TileSize; int bBegin = bx * TileSize; int bStep = TileSize * nInnerDimension; double Csub = 0.0; volatile __shared__ double As[32][32]; volatile __shared__ double Bs[32][32]; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b +=bStep) { As[ty][tx] = dA[a + nRows * ty + tx]; Bs[ty][tx] = dB[b + nInnerDimension * ty + tx]; __syncthreads(); for (int k = 0; k < TileSize; ++k) { Csub += As[ty][k] * Bs[k][tx]; } __syncthreads(); //__threadfence_block(); } int c = nInnerDimension * TileSize * by + TileSize * bx; dC[c + nInnerDimension * ty + tx] = Csub; } double* read_array(const char* filename, int len) { double *x = (double*) malloc(len * sizeof(double)); FILE *fp = fopen(filename, "r"); for (int i = 0; i < len; i++) { fscanf(fp, "%lf", &x[i]); } fclose(fp); return x; } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension,int nCols, int tileSize, float* incTime ); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { if(argc!=2) { printf("Usage: ./problem2 N\n"); return 0; } int nRows = 1024; int nInnerDimension = 1024; int nCols = 1024; int num_elementsA= nRows*nInnerDimension; int num_elementsB=nInnerDimension*nCols; int num_elementsC= nRows*nCols; int tileSize = atoi(argv[1]); //change this for scaling analysis float incTime=0; // Time for GPU double* hA = read_array("inputA.inp",num_elementsA); double* hB = read_array("inputB.inp",num_elementsB); double* hC = (double*) malloc(num_elementsC * sizeof(double)); // **===-------- Modify the body of this function -----------===** computeOnDevice( hA, hB,hC, nRows, nInnerDimension, nCols, tileSize, &incTime); // **===-----------------------------------------------------------===** printf("%f\n%f\n%d\n",hC[num_elementsC-1],incTime,tileSize); // cleanup memory free(hA); free(hB); free(hC); return 0; } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension, int nCols, int TileSize, float* incTime) { hipEvent_t startEvent_inc, stopEvent_inc; float elapsedTime_inc; hipEventCreate(&startEvent_inc); hipEventCreate(&stopEvent_inc); hipEventRecord(startEvent_inc,0); double* Ad; hipMalloc((void**)&Ad, nRows * nInnerDimension * sizeof(double)); hipMemcpy(Ad, hA, nRows * nInnerDimension * sizeof(double), hipMemcpyHostToDevice); double* Bd; hipMalloc((void **)&Bd, nInnerDimension * nCols * sizeof(double)); hipMemcpy(Bd, hB, nInnerDimension * nCols * sizeof(double), hipMemcpyHostToDevice); double* Cd; hipMalloc((void **)&Cd, nRows * nCols * sizeof(double)); dim3 dimBlock(TileSize, TileSize); int tempx = nRows; if (nInnerDimension > nRows) tempx = nInnerDimension; tempx = (tempx + TileSize - 1)/TileSize; int tempy = nCols; if (nInnerDimension > nCols) tempy = nInnerDimension; tempy = (tempy + TileSize - 1)/TileSize; dim3 dimGrid(tempx, tempy); hipLaunchKernelGGL(( MatMult), dim3(dimGrid), dim3(dimBlock), sizeof(double) * TileSize * TileSize, 0, Ad, Bd, Cd, nRows, nInnerDimension, nCols, TileSize); //, sizeof(double) * TileSize * TileSize hipMemcpy(hC, Cd, nRows * nCols * sizeof(double), hipMemcpyDeviceToHost); hipFree(Ad); hipFree(Bd); hipFree(Cd); hipEventRecord(stopEvent_inc,0); hipEventSynchronize(stopEvent_inc); hipEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc); *incTime = elapsedTime_inc; return; }
1133534d335a45d628dc509d5760637d7bd85106.cu
#ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> __global__ void MatMult(double *dA, double *dB, double *dC, int nRows, int nInnerDimension, int nCols, int TileSize) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = nRows * TileSize * by; int aEnd = aBegin + nRows - 1; int aStep = TileSize; int bBegin = bx * TileSize; int bStep = TileSize * nInnerDimension; double Csub = 0.0; volatile __shared__ double As[32][32]; volatile __shared__ double Bs[32][32]; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b +=bStep) { As[ty][tx] = dA[a + nRows * ty + tx]; Bs[ty][tx] = dB[b + nInnerDimension * ty + tx]; __syncthreads(); for (int k = 0; k < TileSize; ++k) { Csub += As[ty][k] * Bs[k][tx]; } __syncthreads(); //__threadfence_block(); } int c = nInnerDimension * TileSize * by + TileSize * bx; dC[c + nInnerDimension * ty + tx] = Csub; } double* read_array(const char* filename, int len) { double *x = (double*) malloc(len * sizeof(double)); FILE *fp = fopen(filename, "r"); for (int i = 0; i < len; i++) { fscanf(fp, "%lf", &x[i]); } fclose(fp); return x; } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension,int nCols, int tileSize, float* incTime ); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { if(argc!=2) { printf("Usage: ./problem2 N\n"); return 0; } int nRows = 1024; int nInnerDimension = 1024; int nCols = 1024; int num_elementsA= nRows*nInnerDimension; int num_elementsB=nInnerDimension*nCols; int num_elementsC= nRows*nCols; int tileSize = atoi(argv[1]); //change this for scaling analysis float incTime=0; // Time for GPU double* hA = read_array("inputA.inp",num_elementsA); double* hB = read_array("inputB.inp",num_elementsB); double* hC = (double*) malloc(num_elementsC * sizeof(double)); // **===-------- Modify the body of this function -----------===** computeOnDevice( hA, hB,hC, nRows, nInnerDimension, nCols, tileSize, &incTime); // **===-----------------------------------------------------------===** printf("%f\n%f\n%d\n",hC[num_elementsC-1],incTime,tileSize); // cleanup memory free(hA); free(hB); free(hC); return 0; } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension, int nCols, int TileSize, float* incTime) { cudaEvent_t startEvent_inc, stopEvent_inc; float elapsedTime_inc; cudaEventCreate(&startEvent_inc); cudaEventCreate(&stopEvent_inc); cudaEventRecord(startEvent_inc,0); double* Ad; cudaMalloc((void**)&Ad, nRows * nInnerDimension * sizeof(double)); cudaMemcpy(Ad, hA, nRows * nInnerDimension * sizeof(double), cudaMemcpyHostToDevice); double* Bd; cudaMalloc((void **)&Bd, nInnerDimension * nCols * sizeof(double)); cudaMemcpy(Bd, hB, nInnerDimension * nCols * sizeof(double), cudaMemcpyHostToDevice); double* Cd; cudaMalloc((void **)&Cd, nRows * nCols * sizeof(double)); dim3 dimBlock(TileSize, TileSize); int tempx = nRows; if (nInnerDimension > nRows) tempx = nInnerDimension; tempx = (tempx + TileSize - 1)/TileSize; int tempy = nCols; if (nInnerDimension > nCols) tempy = nInnerDimension; tempy = (tempy + TileSize - 1)/TileSize; dim3 dimGrid(tempx, tempy); MatMult<<<dimGrid, dimBlock, sizeof(double) * TileSize * TileSize>>>(Ad, Bd, Cd, nRows, nInnerDimension, nCols, TileSize); //, sizeof(double) * TileSize * TileSize cudaMemcpy(hC, Cd, nRows * nCols * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); cudaEventRecord(stopEvent_inc,0); cudaEventSynchronize(stopEvent_inc); cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc); *incTime = elapsedTime_inc; return; }
0821f0f09e185071672f615ac3f9abffc8c06011.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while(0) #define Mask_width 5 #define Mask_radius Mask_width/2 #define Tile_width 12 #define Block_width Tile_width + Mask_width-1 #define Max_channels 3 //@@ INSERT CODE HERE __global__ void imgConv(float *imgIn, const float * __restrict__ mask, float *imgOut, int rows, int cols, int channels, int step) { __shared__ float ds_roi [Max_channels][Block_width][Block_width]; int row = blockIdx.y*Tile_width + threadIdx.y; int col = blockIdx.z*Tile_width + threadIdx.z; int row_i = row - (int)Mask_radius; int col_i = col - (int)Mask_radius; if(row_i >= 0 && col_i>= 0 && row_i < rows && col_i < cols) ds_roi[threadIdx.x][threadIdx.y][threadIdx.z] = imgIn[(row_i*cols+col_i)*channels + threadIdx.x]; else ds_roi[threadIdx.x][threadIdx.y][threadIdx.z] = 0.0; __syncthreads(); if(threadIdx.y < Tile_width && threadIdx.z< Tile_width) { float val = 0.0; for(int r = 0; r<Mask_width; ++r) { for(int c=0; c<Mask_width; ++c) { val += mask[r*Mask_width + c] * ds_roi[threadIdx.x][threadIdx.y+r][threadIdx.z+c]; } } if(row < rows && col < cols) imgOut[(row*cols+col)*channels + threadIdx.x] = val; } } int main(int argc, char* argv[]) { wbArg_t args; int maskRows; int maskColumns; int imageChannels; int imageWidth; int imageHeight; char * inputImageFile; char * inputMaskFile; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; float * hostMaskData; float * deviceInputImageData; float * deviceOutputImageData; float * deviceMaskData; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); inputMaskFile = wbArg_getInputFile(args, 1); inputImage = wbImport(inputImageFile); hostMaskData = (float *) wbImport(inputMaskFile, &maskRows, &maskColumns); assert(maskRows == 5); /* mask height is fixed to 5 in this mp */ assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */ imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); hipMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); hipMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); hipMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); hipMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(deviceMaskData, hostMaskData, maskRows * maskColumns * sizeof(float), hipMemcpyHostToDevice); wbTime_stop(Copy, "Copying data to the GPU"); wbTime_start(Compute, "Doing the computation on the GPU"); //@@ INSERT CODE HERE dim3 dimGrid (1,(imageHeight-1)/Tile_width+1,(imageWidth-1)/Tile_width+1); dim3 dimBlock(imageChannels, Block_width, Block_width); hipLaunchKernelGGL(( imgConv), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceInputImageData, deviceMaskData, deviceOutputImageData, imageHeight, imageWidth, imageChannels, imageWidth*imageChannels); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); hipMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(args, outputImage); hipFree(deviceInputImageData); hipFree(deviceOutputImageData); hipFree(deviceMaskData); free(hostMaskData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
0821f0f09e185071672f615ac3f9abffc8c06011.cu
#include <wb.h> #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while(0) #define Mask_width 5 #define Mask_radius Mask_width/2 #define Tile_width 12 #define Block_width Tile_width + Mask_width-1 #define Max_channels 3 //@@ INSERT CODE HERE __global__ void imgConv(float *imgIn, const float * __restrict__ mask, float *imgOut, int rows, int cols, int channels, int step) { __shared__ float ds_roi [Max_channels][Block_width][Block_width]; int row = blockIdx.y*Tile_width + threadIdx.y; int col = blockIdx.z*Tile_width + threadIdx.z; int row_i = row - (int)Mask_radius; int col_i = col - (int)Mask_radius; if(row_i >= 0 && col_i>= 0 && row_i < rows && col_i < cols) ds_roi[threadIdx.x][threadIdx.y][threadIdx.z] = imgIn[(row_i*cols+col_i)*channels + threadIdx.x]; else ds_roi[threadIdx.x][threadIdx.y][threadIdx.z] = 0.0; __syncthreads(); if(threadIdx.y < Tile_width && threadIdx.z< Tile_width) { float val = 0.0; for(int r = 0; r<Mask_width; ++r) { for(int c=0; c<Mask_width; ++c) { val += mask[r*Mask_width + c] * ds_roi[threadIdx.x][threadIdx.y+r][threadIdx.z+c]; } } if(row < rows && col < cols) imgOut[(row*cols+col)*channels + threadIdx.x] = val; } } int main(int argc, char* argv[]) { wbArg_t args; int maskRows; int maskColumns; int imageChannels; int imageWidth; int imageHeight; char * inputImageFile; char * inputMaskFile; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; float * hostMaskData; float * deviceInputImageData; float * deviceOutputImageData; float * deviceMaskData; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); inputMaskFile = wbArg_getInputFile(args, 1); inputImage = wbImport(inputImageFile); hostMaskData = (float *) wbImport(inputMaskFile, &maskRows, &maskColumns); assert(maskRows == 5); /* mask height is fixed to 5 in this mp */ assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */ imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); cudaMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); cudaMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); cudaMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); cudaMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceMaskData, hostMaskData, maskRows * maskColumns * sizeof(float), cudaMemcpyHostToDevice); wbTime_stop(Copy, "Copying data to the GPU"); wbTime_start(Compute, "Doing the computation on the GPU"); //@@ INSERT CODE HERE dim3 dimGrid (1,(imageHeight-1)/Tile_width+1,(imageWidth-1)/Tile_width+1); dim3 dimBlock(imageChannels, Block_width, Block_width); imgConv<<<dimGrid,dimBlock>>>(deviceInputImageData, deviceMaskData, deviceOutputImageData, imageHeight, imageWidth, imageChannels, imageWidth*imageChannels); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(args, outputImage); cudaFree(deviceInputImageData); cudaFree(deviceOutputImageData); cudaFree(deviceMaskData); free(hostMaskData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
80b90cee1bedb30eba39e3119ccb11487c14b785.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cumo/narray_kernel.h" #include "cumo/indexer.h" #if defined(__cplusplus) extern "C" { #if 0 } /* satisfy cc-mode */ #endif #endif #define CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL(NDIM) \ __global__ void cumo_ndloop_copy_from_buffer_kernel_dim##NDIM( \ cumo_na_iarray_stridx_t a, cumo_na_indexer_t indexer, char *buf, size_t elmsz) { \ for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < indexer.total_size; i += blockDim.x * gridDim.x) { \ cumo_na_indexer_set_dim##NDIM(&indexer, i); \ char* p = cumo_na_iarray_stridx_at_dim##NDIM(&a, &indexer); \ memcpy(p, buf + i * elmsz, elmsz); \ } \ } #define CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL(NDIM) \ __global__ void cumo_ndloop_copy_to_buffer_kernel_dim##NDIM( \ cumo_na_iarray_stridx_t a, cumo_na_indexer_t indexer, char *buf, size_t elmsz) { \ for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < indexer.total_size; i += blockDim.x * gridDim.x) { \ cumo_na_indexer_set_dim##NDIM(&indexer, i); \ char* p = cumo_na_iarray_stridx_at_dim##NDIM(&a, &indexer); \ memcpy(buf + i * elmsz, p, elmsz); \ } \ } CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL(1) CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL(2) CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL(3) CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL(4) CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL() CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL(1) CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL(2) CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL(3) CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL(4) CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL() #undef CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL #undef CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL void cumo_ndloop_copy_from_buffer_kernel_launch(cumo_na_iarray_stridx_t *a, cumo_na_indexer_t* indexer, char *buf, size_t elmsz) { size_t grid_dim = cumo_get_grid_dim(indexer->total_size); size_t block_dim = cumo_get_block_dim(indexer->total_size); switch (indexer->ndim) { case 1: hipLaunchKernelGGL(( cumo_ndloop_copy_from_buffer_kernel_dim1), dim3(grid_dim), dim3(block_dim), 0, 0, *a,*indexer,buf,elmsz); break; case 2: hipLaunchKernelGGL(( cumo_ndloop_copy_from_buffer_kernel_dim2), dim3(grid_dim), dim3(block_dim), 0, 0, *a,*indexer,buf,elmsz); break; case 3: hipLaunchKernelGGL(( cumo_ndloop_copy_from_buffer_kernel_dim3), dim3(grid_dim), dim3(block_dim), 0, 0, *a,*indexer,buf,elmsz); break; case 4: hipLaunchKernelGGL(( cumo_ndloop_copy_from_buffer_kernel_dim4), dim3(grid_dim), dim3(block_dim), 0, 0, *a,*indexer,buf,elmsz); break; default: hipLaunchKernelGGL(( cumo_ndloop_copy_from_buffer_kernel_dim), dim3(grid_dim), dim3(block_dim), 0, 0, *a,*indexer,buf,elmsz); break; } } void cumo_ndloop_copy_to_buffer_kernel_launch(cumo_na_iarray_stridx_t *a, cumo_na_indexer_t* indexer, char *buf, size_t elmsz) { size_t grid_dim = cumo_get_grid_dim(indexer->total_size); size_t block_dim = cumo_get_block_dim(indexer->total_size); switch (indexer->ndim) { case 1: hipLaunchKernelGGL(( cumo_ndloop_copy_to_buffer_kernel_dim1), dim3(grid_dim), dim3(block_dim), 0, 0, *a,*indexer,buf,elmsz); break; case 2: hipLaunchKernelGGL(( cumo_ndloop_copy_to_buffer_kernel_dim2), dim3(grid_dim), dim3(block_dim), 0, 0, *a,*indexer,buf,elmsz); break; case 3: hipLaunchKernelGGL(( cumo_ndloop_copy_to_buffer_kernel_dim3), dim3(grid_dim), dim3(block_dim), 0, 0, *a,*indexer,buf,elmsz); break; case 4: hipLaunchKernelGGL(( cumo_ndloop_copy_to_buffer_kernel_dim4), dim3(grid_dim), dim3(block_dim), 0, 0, *a,*indexer,buf,elmsz); break; default: hipLaunchKernelGGL(( cumo_ndloop_copy_to_buffer_kernel_dim), dim3(grid_dim), dim3(block_dim), 0, 0, *a,*indexer,buf,elmsz); break; } } #if defined(__cplusplus) #if 0 { /* satisfy cc-mode */ #endif } /* extern "C" { */ #endif
80b90cee1bedb30eba39e3119ccb11487c14b785.cu
#include "cumo/narray_kernel.h" #include "cumo/indexer.h" #if defined(__cplusplus) extern "C" { #if 0 } /* satisfy cc-mode */ #endif #endif #define CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL(NDIM) \ __global__ void cumo_ndloop_copy_from_buffer_kernel_dim##NDIM( \ cumo_na_iarray_stridx_t a, cumo_na_indexer_t indexer, char *buf, size_t elmsz) { \ for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < indexer.total_size; i += blockDim.x * gridDim.x) { \ cumo_na_indexer_set_dim##NDIM(&indexer, i); \ char* p = cumo_na_iarray_stridx_at_dim##NDIM(&a, &indexer); \ memcpy(p, buf + i * elmsz, elmsz); \ } \ } #define CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL(NDIM) \ __global__ void cumo_ndloop_copy_to_buffer_kernel_dim##NDIM( \ cumo_na_iarray_stridx_t a, cumo_na_indexer_t indexer, char *buf, size_t elmsz) { \ for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < indexer.total_size; i += blockDim.x * gridDim.x) { \ cumo_na_indexer_set_dim##NDIM(&indexer, i); \ char* p = cumo_na_iarray_stridx_at_dim##NDIM(&a, &indexer); \ memcpy(buf + i * elmsz, p, elmsz); \ } \ } CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL(1) CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL(2) CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL(3) CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL(4) CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL() CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL(1) CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL(2) CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL(3) CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL(4) CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL() #undef CUMO_NDLOOP_COPY_FROM_BUFFER_KERNEL #undef CUMO_NDLOOP_COPY_TO_BUFFER_KERNEL void cumo_ndloop_copy_from_buffer_kernel_launch(cumo_na_iarray_stridx_t *a, cumo_na_indexer_t* indexer, char *buf, size_t elmsz) { size_t grid_dim = cumo_get_grid_dim(indexer->total_size); size_t block_dim = cumo_get_block_dim(indexer->total_size); switch (indexer->ndim) { case 1: cumo_ndloop_copy_from_buffer_kernel_dim1<<<grid_dim, block_dim>>>(*a,*indexer,buf,elmsz); break; case 2: cumo_ndloop_copy_from_buffer_kernel_dim2<<<grid_dim, block_dim>>>(*a,*indexer,buf,elmsz); break; case 3: cumo_ndloop_copy_from_buffer_kernel_dim3<<<grid_dim, block_dim>>>(*a,*indexer,buf,elmsz); break; case 4: cumo_ndloop_copy_from_buffer_kernel_dim4<<<grid_dim, block_dim>>>(*a,*indexer,buf,elmsz); break; default: cumo_ndloop_copy_from_buffer_kernel_dim<<<grid_dim, block_dim>>>(*a,*indexer,buf,elmsz); break; } } void cumo_ndloop_copy_to_buffer_kernel_launch(cumo_na_iarray_stridx_t *a, cumo_na_indexer_t* indexer, char *buf, size_t elmsz) { size_t grid_dim = cumo_get_grid_dim(indexer->total_size); size_t block_dim = cumo_get_block_dim(indexer->total_size); switch (indexer->ndim) { case 1: cumo_ndloop_copy_to_buffer_kernel_dim1<<<grid_dim, block_dim>>>(*a,*indexer,buf,elmsz); break; case 2: cumo_ndloop_copy_to_buffer_kernel_dim2<<<grid_dim, block_dim>>>(*a,*indexer,buf,elmsz); break; case 3: cumo_ndloop_copy_to_buffer_kernel_dim3<<<grid_dim, block_dim>>>(*a,*indexer,buf,elmsz); break; case 4: cumo_ndloop_copy_to_buffer_kernel_dim4<<<grid_dim, block_dim>>>(*a,*indexer,buf,elmsz); break; default: cumo_ndloop_copy_to_buffer_kernel_dim<<<grid_dim, block_dim>>>(*a,*indexer,buf,elmsz); break; } } #if defined(__cplusplus) #if 0 { /* satisfy cc-mode */ #endif } /* extern "C" { */ #endif
a85629df697c73efcee2d5598ee7ed73996cf964.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "layer.hpp" #include "math_functions.hpp" #include "moving_normalize_layer.hpp" namespace caffe { template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, Dtype epsilon, const Dtype* data, Dtype* norm_data) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } norm_data[index] = sum + epsilon; } } template <typename Dtype> void MovingNormalizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* square_data = squared_.mutable_gpu_data(); Dtype* norm_data = norm_.mutable_gpu_data(); Dtype* moving_average_norm = this->blobs_[0]->mutable_cpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int spatial_dim = bottom[0]->height() * bottom[0]->width(); int count = bottom[0]->count(); Dtype mean_norm = Dtype(0); caffe_gpu_powx(num*channels*spatial_dim, bottom_data, Dtype(2), square_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(num*spatial_dim), CAFFE_CUDA_NUM_THREADS >> > (num, channels, spatial_dim, 1e-12, square_data, norm_data); caffe_gpu_powx(num * spatial_dim, norm_data, Dtype(0.5), norm_data); caffe_gpu_dot(num, norm_.gpu_data(), sum_multiplier_.gpu_data(), &mean_norm); mean_norm /= num * spatial_dim; if (moving_average_norm[0] < 0) { moving_average_norm[0] = mean_norm; } else { moving_average_norm[0] = moving_average_norm[0] * 0.99 + mean_norm * 0.01; } if (top.size() == 2) { top[1]->mutable_cpu_data()[0] = moving_average_norm[0]; } caffe_gpu_scale(count, Dtype(1) / moving_average_norm[0], bottom_data, top_data); } INSTANTIATE_LAYER_GPU_FUNCS(MovingNormalizeLayer); } // namespace caffe
a85629df697c73efcee2d5598ee7ed73996cf964.cu
#include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "layer.hpp" #include "math_functions.hpp" #include "moving_normalize_layer.hpp" namespace caffe { template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, Dtype epsilon, const Dtype* data, Dtype* norm_data) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } norm_data[index] = sum + epsilon; } } template <typename Dtype> void MovingNormalizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* square_data = squared_.mutable_gpu_data(); Dtype* norm_data = norm_.mutable_gpu_data(); Dtype* moving_average_norm = this->blobs_[0]->mutable_cpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int spatial_dim = bottom[0]->height() * bottom[0]->width(); int count = bottom[0]->count(); Dtype mean_norm = Dtype(0); caffe_gpu_powx(num*channels*spatial_dim, bottom_data, Dtype(2), square_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(num*spatial_dim), CAFFE_CUDA_NUM_THREADS >> > (num, channels, spatial_dim, 1e-12, square_data, norm_data); caffe_gpu_powx(num * spatial_dim, norm_data, Dtype(0.5), norm_data); caffe_gpu_dot(num, norm_.gpu_data(), sum_multiplier_.gpu_data(), &mean_norm); mean_norm /= num * spatial_dim; if (moving_average_norm[0] < 0) { moving_average_norm[0] = mean_norm; } else { moving_average_norm[0] = moving_average_norm[0] * 0.99 + mean_norm * 0.01; } if (top.size() == 2) { top[1]->mutable_cpu_data()[0] = moving_average_norm[0]; } caffe_gpu_scale(count, Dtype(1) / moving_average_norm[0], bottom_data, top_data); } INSTANTIATE_LAYER_GPU_FUNCS(MovingNormalizeLayer); } // namespace caffe
98f603afdbdd8e38a0e4a9cde91eaba2ef92223c.hip
// !!! This is a file automatically generated by hipify!!! #include <color_spinor_field.h> #include <color_spinor_field_order.h> #include <dslash_quda.h> #include <index_helper.cuh> #include <dslash_quda.h> #include <include/kernels/dslash_domain_wall_m5.cuh> namespace quda { /* FIXME - fix flops counters - check dagger operators are correct - there might need to be a shift by 1 in which coefficients are used and conjugation of coefficients - use kappa notation and not b/c for consistency with other codes and sanity */ template <typename Float, int nColor, typename Arg> class Dslash5 : public TunableVectorYZ { protected: Arg &arg; const ColorSpinorField &meta; static constexpr bool shared = true; // whether to use shared memory cache blocking for M5inv /** Whether to use variable or fixed coefficient algorithm. Must be true if using ZMOBIUS */ static constexpr bool var_inverse = true; long long flops() const { long long Ls = meta.X(4); long long bulk = (Ls - 2) * (meta.Volume() / Ls); long long wall = 2 * meta.Volume() / Ls; long long n = meta.Ncolor() * meta.Nspin(); long long flops_ = 0; switch (arg.type) { case DSLASH5_DWF: flops_ = n * (8ll * bulk + 10ll * wall + (arg.xpay ? 4ll * meta.Volume() : 0)); break; case DSLASH5_MOBIUS_PRE: flops_ = n * (8ll * bulk + 10ll * wall + 14ll * meta.Volume() + (arg.xpay ? 8ll * meta.Volume() : 0)); break; case DSLASH5_MOBIUS: flops_ = n * (8ll * bulk + 10ll * wall + 8ll * meta.Volume() + (arg.xpay ? 8ll * meta.Volume() : 0)); break; case M5_INV_DWF: case M5_INV_MOBIUS: // FIXME flops // flops_ = ((2 + 8 * n) * Ls + (arg.xpay ? 4ll : 0)) * meta.Volume(); flops_ = (144 * Ls + (arg.xpay ? 4ll : 0)) * meta.Volume(); break; case M5_INV_ZMOBIUS: // flops_ = ((12 + 16 * n) * Ls + (arg.xpay ? 8ll : 0)) * meta.Volume(); flops_ = (144 * Ls + (arg.xpay ? 8ll : 0)) * meta.Volume(); break; default: errorQuda("Unknown Dslash5Type %d", arg.type); } return flops_; } long long bytes() const { long long Ls = meta.X(4); switch (arg.type) { case DSLASH5_DWF: return arg.out.Bytes() + 2 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case DSLASH5_MOBIUS_PRE: return arg.out.Bytes() + 3 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case DSLASH5_MOBIUS: return arg.out.Bytes() + 3 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_DWF: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_MOBIUS: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_ZMOBIUS: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); default: errorQuda("Unknown Dslash5Type %d", arg.type); } return 0ll; } bool tuneGridDim() const { return false; } unsigned int minThreads() const { return arg.volume_4d_cb; } int blockStep() const { return 4; } int blockMin() const { return 4; } unsigned int sharedBytesPerThread() const { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { // spin components in shared depend on inversion algorithm int nSpin = var_inverse ? meta.Nspin() / 2 : meta.Nspin(); return 2 * nSpin * nColor * sizeof(typename mapper<Float>::type); } else { return 0; } } // overloaded to return max dynamic shared memory if doing shared-memory inverse unsigned int maxSharedBytesPerBlock() const { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { return maxDynamicSharedBytesPerBlock(); } else { return TunableVectorYZ::maxSharedBytesPerBlock(); } } public: Dslash5(Arg &arg, const ColorSpinorField &meta) : TunableVectorYZ(arg.Ls, arg.nParity), arg(arg), meta(meta) { strcpy(aux, meta.AuxString()); if (arg.dagger) strcat(aux, ",Dagger"); if (arg.xpay) strcat(aux, ",xpay"); switch (arg.type) { case DSLASH5_DWF: strcat(aux, ",DSLASH5_DWF"); break; case DSLASH5_MOBIUS_PRE: strcat(aux, ",DSLASH5_MOBIUS_PRE"); break; case DSLASH5_MOBIUS: strcat(aux, ",DSLASH5_MOBIUS"); break; case M5_INV_DWF: strcat(aux, ",M5_INV_DWF"); break; case M5_INV_MOBIUS: strcat(aux, ",M5_INV_MOBIUS"); break; case M5_INV_ZMOBIUS: strcat(aux, ",M5_INV_ZMOBIUS"); break; default: errorQuda("Unknown Dslash5Type %d", arg.type); } } virtual ~Dslash5() {} template <typename T> inline void launch(T *f, const TuneParam &tp, Arg &arg, const hipStream_t &stream) { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { // if inverse kernel uses shared memory then maximize total shared memory pool setMaxDynamicSharedBytesPerBlock(f); } void *args[] = {&arg}; qudaLaunchKernel((const void *)f, tp.grid, tp.block, args, tp.shared_bytes, stream); } void apply(const hipStream_t &stream) { if (meta.Location() == QUDA_CPU_FIELD_LOCATION) { constexpr bool shared = false; // CPU does not have shared memory if (arg.type == DSLASH5_DWF) { if (arg.xpay) arg.dagger ? dslash5CPU<Float, nColor, true, true, DSLASH5_DWF>(arg) : dslash5CPU<Float, nColor, false, true, DSLASH5_DWF>(arg); else arg.dagger ? dslash5CPU<Float, nColor, true, false, DSLASH5_DWF>(arg) : dslash5CPU<Float, nColor, false, false, DSLASH5_DWF>(arg); } else if (arg.type == DSLASH5_MOBIUS_PRE) { if (arg.xpay) arg.dagger ? dslash5CPU<Float, nColor, true, true, DSLASH5_MOBIUS_PRE>(arg) : dslash5CPU<Float, nColor, false, true, DSLASH5_MOBIUS_PRE>(arg); else arg.dagger ? dslash5CPU<Float, nColor, true, false, DSLASH5_MOBIUS_PRE>(arg) : dslash5CPU<Float, nColor, false, false, DSLASH5_MOBIUS_PRE>(arg); } else if (arg.type == DSLASH5_MOBIUS) { if (arg.xpay) arg.dagger ? dslash5CPU<Float, nColor, true, true, DSLASH5_MOBIUS>(arg) : dslash5CPU<Float, nColor, false, true, DSLASH5_MOBIUS>(arg); else arg.dagger ? dslash5CPU<Float, nColor, true, false, DSLASH5_MOBIUS>(arg) : dslash5CPU<Float, nColor, false, false, DSLASH5_MOBIUS>(arg); } else if (arg.type == M5_INV_DWF) { if (arg.xpay) arg.dagger ? dslash5invCPU<Float, nColor, true, true, M5_INV_DWF, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, true, M5_INV_DWF, shared, var_inverse>(arg); else arg.dagger ? dslash5invCPU<Float, nColor, true, false, M5_INV_DWF, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, false, M5_INV_DWF, shared, var_inverse>(arg); } else if (arg.type == M5_INV_MOBIUS) { if (arg.xpay) arg.dagger ? dslash5invCPU<Float, nColor, true, true, M5_INV_MOBIUS, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, true, M5_INV_MOBIUS, shared, var_inverse>(arg); else arg.dagger ? dslash5invCPU<Float, nColor, true, false, M5_INV_MOBIUS, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, false, M5_INV_MOBIUS, shared, var_inverse>(arg); } else if (arg.type == M5_INV_ZMOBIUS) { if (arg.xpay) arg.dagger ? dslash5invCPU<Float, nColor, true, true, M5_INV_ZMOBIUS, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, true, M5_INV_ZMOBIUS, shared, var_inverse>(arg); else arg.dagger ? dslash5invCPU<Float, nColor, true, false, M5_INV_ZMOBIUS, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, false, M5_INV_ZMOBIUS, shared, var_inverse>(arg); } } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (arg.type == DSLASH5_DWF) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_DWF, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_DWF, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_DWF, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_DWF, Arg>, tp, arg, stream); } else if (arg.type == DSLASH5_MOBIUS_PRE) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream); } else if (arg.type == DSLASH5_MOBIUS) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_MOBIUS, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_MOBIUS, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_MOBIUS, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_MOBIUS, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_DWF) { if (arg.xpay) arg.dagger ? launch(dslash5invGPU<Float, nColor, true, true, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5invGPU<Float, nColor, true, false, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_MOBIUS) { if (arg.xpay) arg.dagger ? launch( dslash5invGPU<Float, nColor, true, true, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch( dslash5invGPU<Float, nColor, true, false, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_ZMOBIUS) { if (arg.xpay) arg.dagger ? launch( dslash5invGPU<Float, nColor, true, true, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch( dslash5invGPU<Float, nColor, true, false, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream); } } } void initTuneParam(TuneParam &param) const { TunableVectorYZ::initTuneParam(param); if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { param.block.y = arg.Ls; // Ls must be contained in the block param.grid.y = 1; param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z; } } void defaultTuneParam(TuneParam &param) const { TunableVectorYZ::defaultTuneParam(param); if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { param.block.y = arg.Ls; // Ls must be contained in the block param.grid.y = 1; param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z; } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } }; template <typename Float, int nColor> void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { Dslash5Arg<Float, nColor> arg(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); Dslash5<Float, nColor, Dslash5Arg<Float, nColor>> dslash(arg, in); dslash.apply(streams[Nstream - 1]); } // template on the number of colors template <typename Float> void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { switch (in.Ncolor()) { case 3: ApplyDslash5<Float, 3>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; default: errorQuda("Unsupported number of colors %d\n", in.Ncolor()); } } // Apply the 5th dimension dslash operator to a colorspinor field // out = Dslash5*in void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { #ifdef GPU_DOMAIN_WALL_DIRAC if (in.PCType() != QUDA_4D_PC) errorQuda("Only 4-d preconditioned fields are supported"); checkLocation(out, in); // check all locations match switch (checkPrecision(out, in)) { case QUDA_DOUBLE_PRECISION: ApplyDslash5<double>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_SINGLE_PRECISION: ApplyDslash5<float>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_HALF_PRECISION: ApplyDslash5<short>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_QUARTER_PRECISION: ApplyDslash5<char>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; default: errorQuda("Unsupported precision %d\n", in.Precision()); } #else errorQuda("Domain wall dslash has not been built"); #endif } } // namespace quda
98f603afdbdd8e38a0e4a9cde91eaba2ef92223c.cu
#include <color_spinor_field.h> #include <color_spinor_field_order.h> #include <dslash_quda.h> #include <index_helper.cuh> #include <dslash_quda.h> #include <include/kernels/dslash_domain_wall_m5.cuh> namespace quda { /* FIXME - fix flops counters - check dagger operators are correct - there might need to be a shift by 1 in which coefficients are used and conjugation of coefficients - use kappa notation and not b/c for consistency with other codes and sanity */ template <typename Float, int nColor, typename Arg> class Dslash5 : public TunableVectorYZ { protected: Arg &arg; const ColorSpinorField &meta; static constexpr bool shared = true; // whether to use shared memory cache blocking for M5inv /** Whether to use variable or fixed coefficient algorithm. Must be true if using ZMOBIUS */ static constexpr bool var_inverse = true; long long flops() const { long long Ls = meta.X(4); long long bulk = (Ls - 2) * (meta.Volume() / Ls); long long wall = 2 * meta.Volume() / Ls; long long n = meta.Ncolor() * meta.Nspin(); long long flops_ = 0; switch (arg.type) { case DSLASH5_DWF: flops_ = n * (8ll * bulk + 10ll * wall + (arg.xpay ? 4ll * meta.Volume() : 0)); break; case DSLASH5_MOBIUS_PRE: flops_ = n * (8ll * bulk + 10ll * wall + 14ll * meta.Volume() + (arg.xpay ? 8ll * meta.Volume() : 0)); break; case DSLASH5_MOBIUS: flops_ = n * (8ll * bulk + 10ll * wall + 8ll * meta.Volume() + (arg.xpay ? 8ll * meta.Volume() : 0)); break; case M5_INV_DWF: case M5_INV_MOBIUS: // FIXME flops // flops_ = ((2 + 8 * n) * Ls + (arg.xpay ? 4ll : 0)) * meta.Volume(); flops_ = (144 * Ls + (arg.xpay ? 4ll : 0)) * meta.Volume(); break; case M5_INV_ZMOBIUS: // flops_ = ((12 + 16 * n) * Ls + (arg.xpay ? 8ll : 0)) * meta.Volume(); flops_ = (144 * Ls + (arg.xpay ? 8ll : 0)) * meta.Volume(); break; default: errorQuda("Unknown Dslash5Type %d", arg.type); } return flops_; } long long bytes() const { long long Ls = meta.X(4); switch (arg.type) { case DSLASH5_DWF: return arg.out.Bytes() + 2 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case DSLASH5_MOBIUS_PRE: return arg.out.Bytes() + 3 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case DSLASH5_MOBIUS: return arg.out.Bytes() + 3 * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_DWF: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_MOBIUS: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); case M5_INV_ZMOBIUS: return arg.out.Bytes() + Ls * arg.in.Bytes() + (arg.xpay ? arg.x.Bytes() : 0); default: errorQuda("Unknown Dslash5Type %d", arg.type); } return 0ll; } bool tuneGridDim() const { return false; } unsigned int minThreads() const { return arg.volume_4d_cb; } int blockStep() const { return 4; } int blockMin() const { return 4; } unsigned int sharedBytesPerThread() const { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { // spin components in shared depend on inversion algorithm int nSpin = var_inverse ? meta.Nspin() / 2 : meta.Nspin(); return 2 * nSpin * nColor * sizeof(typename mapper<Float>::type); } else { return 0; } } // overloaded to return max dynamic shared memory if doing shared-memory inverse unsigned int maxSharedBytesPerBlock() const { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { return maxDynamicSharedBytesPerBlock(); } else { return TunableVectorYZ::maxSharedBytesPerBlock(); } } public: Dslash5(Arg &arg, const ColorSpinorField &meta) : TunableVectorYZ(arg.Ls, arg.nParity), arg(arg), meta(meta) { strcpy(aux, meta.AuxString()); if (arg.dagger) strcat(aux, ",Dagger"); if (arg.xpay) strcat(aux, ",xpay"); switch (arg.type) { case DSLASH5_DWF: strcat(aux, ",DSLASH5_DWF"); break; case DSLASH5_MOBIUS_PRE: strcat(aux, ",DSLASH5_MOBIUS_PRE"); break; case DSLASH5_MOBIUS: strcat(aux, ",DSLASH5_MOBIUS"); break; case M5_INV_DWF: strcat(aux, ",M5_INV_DWF"); break; case M5_INV_MOBIUS: strcat(aux, ",M5_INV_MOBIUS"); break; case M5_INV_ZMOBIUS: strcat(aux, ",M5_INV_ZMOBIUS"); break; default: errorQuda("Unknown Dslash5Type %d", arg.type); } } virtual ~Dslash5() {} template <typename T> inline void launch(T *f, const TuneParam &tp, Arg &arg, const cudaStream_t &stream) { if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { // if inverse kernel uses shared memory then maximize total shared memory pool setMaxDynamicSharedBytesPerBlock(f); } void *args[] = {&arg}; qudaLaunchKernel((const void *)f, tp.grid, tp.block, args, tp.shared_bytes, stream); } void apply(const cudaStream_t &stream) { if (meta.Location() == QUDA_CPU_FIELD_LOCATION) { constexpr bool shared = false; // CPU does not have shared memory if (arg.type == DSLASH5_DWF) { if (arg.xpay) arg.dagger ? dslash5CPU<Float, nColor, true, true, DSLASH5_DWF>(arg) : dslash5CPU<Float, nColor, false, true, DSLASH5_DWF>(arg); else arg.dagger ? dslash5CPU<Float, nColor, true, false, DSLASH5_DWF>(arg) : dslash5CPU<Float, nColor, false, false, DSLASH5_DWF>(arg); } else if (arg.type == DSLASH5_MOBIUS_PRE) { if (arg.xpay) arg.dagger ? dslash5CPU<Float, nColor, true, true, DSLASH5_MOBIUS_PRE>(arg) : dslash5CPU<Float, nColor, false, true, DSLASH5_MOBIUS_PRE>(arg); else arg.dagger ? dslash5CPU<Float, nColor, true, false, DSLASH5_MOBIUS_PRE>(arg) : dslash5CPU<Float, nColor, false, false, DSLASH5_MOBIUS_PRE>(arg); } else if (arg.type == DSLASH5_MOBIUS) { if (arg.xpay) arg.dagger ? dslash5CPU<Float, nColor, true, true, DSLASH5_MOBIUS>(arg) : dslash5CPU<Float, nColor, false, true, DSLASH5_MOBIUS>(arg); else arg.dagger ? dslash5CPU<Float, nColor, true, false, DSLASH5_MOBIUS>(arg) : dslash5CPU<Float, nColor, false, false, DSLASH5_MOBIUS>(arg); } else if (arg.type == M5_INV_DWF) { if (arg.xpay) arg.dagger ? dslash5invCPU<Float, nColor, true, true, M5_INV_DWF, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, true, M5_INV_DWF, shared, var_inverse>(arg); else arg.dagger ? dslash5invCPU<Float, nColor, true, false, M5_INV_DWF, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, false, M5_INV_DWF, shared, var_inverse>(arg); } else if (arg.type == M5_INV_MOBIUS) { if (arg.xpay) arg.dagger ? dslash5invCPU<Float, nColor, true, true, M5_INV_MOBIUS, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, true, M5_INV_MOBIUS, shared, var_inverse>(arg); else arg.dagger ? dslash5invCPU<Float, nColor, true, false, M5_INV_MOBIUS, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, false, M5_INV_MOBIUS, shared, var_inverse>(arg); } else if (arg.type == M5_INV_ZMOBIUS) { if (arg.xpay) arg.dagger ? dslash5invCPU<Float, nColor, true, true, M5_INV_ZMOBIUS, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, true, M5_INV_ZMOBIUS, shared, var_inverse>(arg); else arg.dagger ? dslash5invCPU<Float, nColor, true, false, M5_INV_ZMOBIUS, shared, var_inverse>(arg) : dslash5invCPU<Float, nColor, false, false, M5_INV_ZMOBIUS, shared, var_inverse>(arg); } } else { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (arg.type == DSLASH5_DWF) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_DWF, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_DWF, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_DWF, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_DWF, Arg>, tp, arg, stream); } else if (arg.type == DSLASH5_MOBIUS_PRE) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_MOBIUS_PRE, Arg>, tp, arg, stream); } else if (arg.type == DSLASH5_MOBIUS) { if (arg.xpay) arg.dagger ? launch(dslash5GPU<Float, nColor, true, true, DSLASH5_MOBIUS, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, true, DSLASH5_MOBIUS, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5GPU<Float, nColor, true, false, DSLASH5_MOBIUS, Arg>, tp, arg, stream) : launch(dslash5GPU<Float, nColor, false, false, DSLASH5_MOBIUS, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_DWF) { if (arg.xpay) arg.dagger ? launch(dslash5invGPU<Float, nColor, true, true, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch(dslash5invGPU<Float, nColor, true, false, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_DWF, shared, var_inverse, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_MOBIUS) { if (arg.xpay) arg.dagger ? launch( dslash5invGPU<Float, nColor, true, true, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch( dslash5invGPU<Float, nColor, true, false, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_MOBIUS, shared, var_inverse, Arg>, tp, arg, stream); } else if (arg.type == M5_INV_ZMOBIUS) { if (arg.xpay) arg.dagger ? launch( dslash5invGPU<Float, nColor, true, true, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, true, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream); else arg.dagger ? launch( dslash5invGPU<Float, nColor, true, false, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream) : launch(dslash5invGPU<Float, nColor, false, false, M5_INV_ZMOBIUS, shared, var_inverse, Arg>, tp, arg, stream); } } } void initTuneParam(TuneParam &param) const { TunableVectorYZ::initTuneParam(param); if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { param.block.y = arg.Ls; // Ls must be contained in the block param.grid.y = 1; param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z; } } void defaultTuneParam(TuneParam &param) const { TunableVectorYZ::defaultTuneParam(param); if (shared && (arg.type == M5_INV_DWF || arg.type == M5_INV_MOBIUS || arg.type == M5_INV_ZMOBIUS)) { param.block.y = arg.Ls; // Ls must be contained in the block param.grid.y = 1; param.shared_bytes = sharedBytesPerThread() * param.block.x * param.block.y * param.block.z; } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } }; template <typename Float, int nColor> void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { Dslash5Arg<Float, nColor> arg(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); Dslash5<Float, nColor, Dslash5Arg<Float, nColor>> dslash(arg, in); dslash.apply(streams[Nstream - 1]); } // template on the number of colors template <typename Float> void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { switch (in.Ncolor()) { case 3: ApplyDslash5<Float, 3>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; default: errorQuda("Unsupported number of colors %d\n", in.Ncolor()); } } // Apply the 5th dimension dslash operator to a colorspinor field // out = Dslash5*in void ApplyDslash5(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &x, double m_f, double m_5, const Complex *b_5, const Complex *c_5, double a, bool dagger, Dslash5Type type) { #ifdef GPU_DOMAIN_WALL_DIRAC if (in.PCType() != QUDA_4D_PC) errorQuda("Only 4-d preconditioned fields are supported"); checkLocation(out, in); // check all locations match switch (checkPrecision(out, in)) { case QUDA_DOUBLE_PRECISION: ApplyDslash5<double>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_SINGLE_PRECISION: ApplyDslash5<float>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_HALF_PRECISION: ApplyDslash5<short>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; case QUDA_QUARTER_PRECISION: ApplyDslash5<char>(out, in, x, m_f, m_5, b_5, c_5, a, dagger, type); break; default: errorQuda("Unsupported precision %d\n", in.Precision()); } #else errorQuda("Domain wall dslash has not been built"); #endif } } // namespace quda
fb0bc6cda3121b971044f529a5a17d48971dfd86.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void setup_kernel (hiprandState_t * state, unsigned long seed ) { int i= blockDim.x * blockIdx.x + threadIdx.x; hiprand_init (seed, i, 0, &state[i]); }
fb0bc6cda3121b971044f529a5a17d48971dfd86.cu
#include "includes.h" __global__ void setup_kernel (curandState * state, unsigned long seed ) { int i= blockDim.x * blockIdx.x + threadIdx.x; curand_init (seed, i, 0, &state[i]); }
2d09aa2946b2225e85d38f2b6b343071ff3c8bb0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Hello Cuda World Program // /* * Author: Malhar Bhatt * Subject : High Performance Computing * */ #include <iostream> /** * Empty Function named Kernel() qualified with __global__ * */ __global__ void kernel (void) { } int main(void) { hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, ); // Calling Empty Function printf("Hello Cuda World !!!\n"); // Printing Hello Cuda World system("pause"); return 0; }
2d09aa2946b2225e85d38f2b6b343071ff3c8bb0.cu
// Hello Cuda World Program // /* * Author: Malhar Bhatt * Subject : High Performance Computing * */ #include <iostream> /** * Empty Function named Kernel() qualified with __global__ * */ __global__ void kernel (void) { } int main(void) { kernel<<<1,1>>>(); // Calling Empty Function printf("Hello Cuda World !!!\n"); // Printing Hello Cuda World system("pause"); return 0; }
adbf5243bf8c2e5180cb4f01f078c7972ed6079f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/hip/HIPContext.h> #include <c10/util/Exception.h> #include <THH/THHDeviceUtils.cuh> #include <THH/THHTensorMathReduce.cuh> #include <THH/THHTensorSort.cuh> #include <THH/THHThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/unique.h> namespace at { namespace native { namespace { #ifdef __HIP_PLATFORM_HCC__ static const int WARP_SIZE = 64; static const int BLOCKDIMY = 16; #else static const int WARP_SIZE = 32; static const int BLOCKDIMY = 32; #endif template <typename scalar_t, typename accscalar_t> __global__ void embedding_backward_feature_kernel (int64_t* indices, const scalar_t* __restrict__ grad, scalar_t* __restrict__ grad_weight, int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot int64_t stride, int padding_idx) { extern __shared__ char buf[]; accscalar_t* smem = (accscalar_t*)buf; accscalar_t* my_s = smem + WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(accscalar_t)*WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)indices[batch_start + tid]; int batch_end = batch_start + blockDim.x*blockDim.y < n ? batch_start + blockDim.x*blockDim.y : n; // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? (batch_end - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #ifdef __HIP_PLATFORM_HCC__ unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #ifdef __HIP_PLATFORM_HCC__ first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]); } } } } } template <typename scalar_t> __global__ void embedding_backward_kernel( int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, int64_t* count, int64_t numel, int64_t stride, int padding_idx) { using accscalar_t = acc_type<scalar_t, true>; int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != padding_idx) { do { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = ((int) input[idx]) * stride; const int grad_row = ((int) indices[idx]) * stride; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename scalar_t, typename accscalar_t> __global__ void renorm_kernel( scalar_t* weights, int64_t* indices, accscalar_t max_norm, accscalar_t norm_type, int64_t dim, int64_t weights_stride0, int64_t weights_stride1) { // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); int tid = threadIdx.x; int base_index = indices[blockIdx.x] * weights_stride0; accscalar_t v = 0; for (int i = tid; i < dim; i += blockDim.x) { auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]); if (norm_type == 1) { v += std::abs(x); } else if (norm_type == 2) { v += x * x; } else { v += ::pow(x, norm_type); } } using Op = ReduceAdd<accscalar_t>; v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0); if (tid == 0) { sdata[0] = ::pow(v, static_cast<accscalar_t>(1.0 / norm_type)); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > max_norm) { auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7)); for (int i = tid; i < dim; i += blockDim.x) { weights[base_index + i * weights_stride1] *= factor; } } } } // anonymous namespace Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { auto grad_arg = TensorArg(grad_, "grad", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkScalarType("embedding_backward", indices_arg, kLong); checkSameGPU("embedding_backward", grad_arg, indices_arg); auto num_indices = indices.numel(); auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); int64_t stride = grad_weight.stride(0); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (num_indices <= 768 && !scale_grad_by_freq) { auto indices_contig = indices.contiguous(); dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE)); dim3 block(WARP_SIZE, BLOCKDIMY); AT_DISPATCH_FLOATING_TYPES_AND_HALF (grad.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( embedding_backward_feature_kernel<scalar_t, accscalar_t>) , dim3(grid), dim3(block), sizeof(accscalar_t)*WARP_SIZE*BLOCKDIMY + sizeof(int)*WARP_SIZE*BLOCKDIMY, stream, indices_contig.data<int64_t>(), grad.data<scalar_t>(), grad_weight.data<scalar_t>(), static_cast<int>(num_indices), static_cast<int64_t>(stride), static_cast<int>(padding_idx)); }); THCudaCheck(hipGetLastError()); return grad_weight; } auto sorted_indices = at::empty_like(indices); auto orig_indices = at::empty_like(indices); using device_ptr = thrust::device_ptr<int64_t>; // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly { sorted_indices.copy_(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort; a stable sort is not required auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>()); } Tensor count; if (scale_grad_by_freq) { count = at::empty_like(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); auto count_data = device_ptr(count.data<int64_t>()); thrust::inclusive_scan_by_key( policy, sorted_data, sorted_data + num_indices, thrust::make_constant_iterator(1), count_data ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( policy, thrust::make_reverse_iterator(sorted_data + num_indices), thrust::make_reverse_iterator(sorted_data), thrust::make_reverse_iterator(count_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), thrust::equal_to<int64_t>(), thrust::maximum<int64_t>() ); } dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128)); dim3 block(32, 4); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "embedding_backward", [&] { hipLaunchKernelGGL(( embedding_backward_kernel), dim3(grid), dim3(block), 0, stream, sorted_indices.data<int64_t>(), orig_indices.data<int64_t>(), grad.data<scalar_t>(), grad_weight.data<scalar_t>(), count.defined() ? count.data<int64_t>() : nullptr, num_indices, stride, padding_idx); }); THCudaCheck(hipGetLastError()); return grad_weight; } Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { auto self_arg = TensorArg(self, "self", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkDim("embedding_renorm_", self_arg, 2); checkSameGPU("embedding_renorm", self_arg, indices_arg); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); using device_ptr = thrust::device_ptr<int64_t>; auto num_indices = indices.numel(); auto indices_contig = indices.contiguous(); auto indices_data = device_ptr(indices_contig.data<int64_t>()); // FIXME: thrust::unique only removes consecutive elements that are equal. // We have race conditions when indices contain duplicates which are not // adjacent auto unique_indices = at::empty(indices.numel(), indices.options()); auto unique_data = device_ptr(unique_indices.data<int64_t>()); auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data); auto num_unique_indices = static_cast<int>(end - unique_data); dim3 grid(num_unique_indices); dim3 block(128); int dim = self.stride(0); AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream, self.data<scalar_t>(), unique_indices.data<int64_t>(), static_cast<accscalar_t>(max_norm), static_cast<accscalar_t>(norm_type), dim, self.stride(0), self.stride(1)); }); THCudaCheck(hipGetLastError()); return self; } }} // namespace at::native
adbf5243bf8c2e5180cb4f01f078c7972ed6079f.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/CUDAContext.h> #include <c10/util/Exception.h> #include <THC/THCDeviceUtils.cuh> #include <THC/THCTensorMathReduce.cuh> #include <THC/THCTensorSort.cuh> #include <THC/THCThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/unique.h> namespace at { namespace native { namespace { #ifdef __HIP_PLATFORM_HCC__ static const int WARP_SIZE = 64; static const int BLOCKDIMY = 16; #else static const int WARP_SIZE = 32; static const int BLOCKDIMY = 32; #endif template <typename scalar_t, typename accscalar_t> __global__ void embedding_backward_feature_kernel (int64_t* indices, const scalar_t* __restrict__ grad, scalar_t* __restrict__ grad_weight, int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot int64_t stride, int padding_idx) { extern __shared__ char buf[]; accscalar_t* smem = (accscalar_t*)buf; accscalar_t* my_s = smem + WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(accscalar_t)*WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)indices[batch_start + tid]; int batch_end = batch_start + blockDim.x*blockDim.y < n ? batch_start + blockDim.x*blockDim.y : n; // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? (batch_end - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #ifdef __HIP_PLATFORM_HCC__ unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #ifdef __HIP_PLATFORM_HCC__ first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]); } } } } } template <typename scalar_t> __global__ void embedding_backward_kernel( int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, int64_t* count, int64_t numel, int64_t stride, int padding_idx) { using accscalar_t = acc_type<scalar_t, true>; int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != padding_idx) { do { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = ((int) input[idx]) * stride; const int grad_row = ((int) indices[idx]) * stride; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename scalar_t, typename accscalar_t> __global__ void renorm_kernel( scalar_t* weights, int64_t* indices, accscalar_t max_norm, accscalar_t norm_type, int64_t dim, int64_t weights_stride0, int64_t weights_stride1) { // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); int tid = threadIdx.x; int base_index = indices[blockIdx.x] * weights_stride0; accscalar_t v = 0; for (int i = tid; i < dim; i += blockDim.x) { auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]); if (norm_type == 1) { v += std::abs(x); } else if (norm_type == 2) { v += x * x; } else { v += std::pow(x, norm_type); } } using Op = ReduceAdd<accscalar_t>; v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0); if (tid == 0) { sdata[0] = std::pow(v, static_cast<accscalar_t>(1.0 / norm_type)); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > max_norm) { auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7)); for (int i = tid; i < dim; i += blockDim.x) { weights[base_index + i * weights_stride1] *= factor; } } } } // anonymous namespace Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { auto grad_arg = TensorArg(grad_, "grad", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkScalarType("embedding_backward", indices_arg, kLong); checkSameGPU("embedding_backward", grad_arg, indices_arg); auto num_indices = indices.numel(); auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); int64_t stride = grad_weight.stride(0); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (num_indices <= 768 && !scale_grad_by_freq) { auto indices_contig = indices.contiguous(); dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE)); dim3 block(WARP_SIZE, BLOCKDIMY); AT_DISPATCH_FLOATING_TYPES_AND_HALF (grad.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; embedding_backward_feature_kernel<scalar_t, accscalar_t> <<<grid, block, sizeof(accscalar_t)*WARP_SIZE*BLOCKDIMY + sizeof(int)*WARP_SIZE*BLOCKDIMY, stream>>> (indices_contig.data<int64_t>(), grad.data<scalar_t>(), grad_weight.data<scalar_t>(), static_cast<int>(num_indices), static_cast<int64_t>(stride), static_cast<int>(padding_idx)); }); THCudaCheck(cudaGetLastError()); return grad_weight; } auto sorted_indices = at::empty_like(indices); auto orig_indices = at::empty_like(indices); using device_ptr = thrust::device_ptr<int64_t>; // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly { sorted_indices.copy_(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort; a stable sort is not required auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>()); } Tensor count; if (scale_grad_by_freq) { count = at::empty_like(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); auto count_data = device_ptr(count.data<int64_t>()); thrust::inclusive_scan_by_key( policy, sorted_data, sorted_data + num_indices, thrust::make_constant_iterator(1), count_data ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( policy, thrust::make_reverse_iterator(sorted_data + num_indices), thrust::make_reverse_iterator(sorted_data), thrust::make_reverse_iterator(count_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), thrust::equal_to<int64_t>(), thrust::maximum<int64_t>() ); } dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128)); dim3 block(32, 4); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "embedding_backward", [&] { embedding_backward_kernel<<<grid, block, 0, stream>>>( sorted_indices.data<int64_t>(), orig_indices.data<int64_t>(), grad.data<scalar_t>(), grad_weight.data<scalar_t>(), count.defined() ? count.data<int64_t>() : nullptr, num_indices, stride, padding_idx); }); THCudaCheck(cudaGetLastError()); return grad_weight; } Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { auto self_arg = TensorArg(self, "self", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkDim("embedding_renorm_", self_arg, 2); checkSameGPU("embedding_renorm", self_arg, indices_arg); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); using device_ptr = thrust::device_ptr<int64_t>; auto num_indices = indices.numel(); auto indices_contig = indices.contiguous(); auto indices_data = device_ptr(indices_contig.data<int64_t>()); // FIXME: thrust::unique only removes consecutive elements that are equal. // We have race conditions when indices contain duplicates which are not // adjacent auto unique_indices = at::empty(indices.numel(), indices.options()); auto unique_data = device_ptr(unique_indices.data<int64_t>()); auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data); auto num_unique_indices = static_cast<int>(end - unique_data); dim3 grid(num_unique_indices); dim3 block(128); int dim = self.stride(0); AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>( self.data<scalar_t>(), unique_indices.data<int64_t>(), static_cast<accscalar_t>(max_norm), static_cast<accscalar_t>(norm_type), dim, self.stride(0), self.stride(1)); }); THCudaCheck(cudaGetLastError()); return self; } }} // namespace at::native
6fee5be5fe091dc0ed3be127f44cee5f69e8c443.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdio> #include <cstdlib> #include <omp.h> #include <sys/time.h> #include <omp.h> #include <sys/mman.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <ctype.h> #include <cstring> #include "tbb/parallel_sort.h" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <helper_cuda.h> using namespace std; const double pi=3.141592653589793238462643383279502884197; int ref_line[20]; char ref_file[20][16]; int sam_line[20]; char sam_file[20][16]; const int GPU_N = 2; const int GBSize = 1024 * 1024 * 1024; const int block_size = 512; const int TILE_SIZE = 1024; struct NODE { double ra,dec; int pix; }; const int cntSize = 805306368; double diffTime(timeval start,timeval end) { return (end.tv_sec - start.tv_sec) * 1000 + (end.tv_usec - start.tv_usec) * 0.001; } bool cmp(NODE a,NODE b) { return a.pix < b.pix; } void readFile(char *file,int N, NODE nn[]) { FILE *fd = fopen(file,"r"); if(fd == NULL) printf("Read %s error!\n",file); for(int i = 0; i < N; ++i) fscanf(fd,"%d%lf%lf",&nn[i].pix,&nn[i].ra,&nn[i].dec); fclose(fd); } __host__ __device__ int begin_index(int key, NODE *node, int N) { for(int i = 0; i < N; ++i) if(node[i].pix > key) return i; return N; } __host__ __device__ int get_start(int key,NODE *node,int N) { for(int i = 0; i < N; ++i) { if(node[i].pix >= key) return i; } return N; } __host__ __device__ int get_end(int key,NODE *node,int N) { for(int i = 0; i < N; ++i) if(node[i].pix > key) return i; return N; } __host__ __device__ int binary_search(int key, NODE *node, int N) { int st = 0; int ed = N - 1; while(st < ed) { int mid = st + ((ed - st) >> 1); if(node[mid].pix <= key) st = mid + 1; else ed = mid; } if(node[ed].pix > key) return ed; return -1; } __host__ __device__ double radians(double degree) { return degree * pi / 180.0; } __host__ __device__ bool matched(double ra1,double dec1,double ra2,double dec2,double radius) { double z1 = sin(radians(dec1)); double x1 = cos(radians(dec1)) * cos(radians(ra1)); double y1 = cos(radians(dec1)) * sin(radians(ra1)); double z2 = sin(radians(dec2)); double x2 = cos(radians(dec2)) * cos(radians(ra2)); double y2 = cos(radians(dec2)) * sin(radians(ra2)); double distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2); double dist2 = 4 * pow(sin(radians(0.0056 / 2)),2); if(distance <= dist2) return true; return false; } __global__ void kernel_singleCM(NODE *ref_node, int ref_N, NODE *sam_node, int sam_N, int *sam_match,int *sam_matchedCnt,int ref_offset,int sam_offset) { __shared__ int s_ref_pix[TILE_SIZE]; __shared__ double s_ref_ra[TILE_SIZE]; __shared__ double s_ref_dec[TILE_SIZE]; __shared__ int start_pix,end_pix; __shared__ int start_ref_pos,end_ref_pos; __shared__ int block_sam_N,block_ref_N; __shared__ int iteration; int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < sam_N) sam_matchedCnt[tid] = 0; if(threadIdx.x == 0) { if(blockIdx.x == gridDim.x - 1) // the last block block_sam_N = sam_N - blockIdx.x * blockDim.x; else block_sam_N = blockDim.x; start_pix = sam_node[tid].pix; end_pix = sam_node[tid + block_sam_N - 1].pix; if(start_pix == 0) start_ref_pos = 0; else start_ref_pos = binary_search(start_pix - 1,ref_node,ref_N); end_ref_pos = binary_search(end_pix,ref_node,ref_N); if(end_ref_pos == -1) end_ref_pos = ref_N - 1; else end_ref_pos--; block_ref_N = end_ref_pos - start_ref_pos + 1; iteration = ceil(block_ref_N * 1.0 / TILE_SIZE); } __syncthreads(); if(start_ref_pos == -1 || end_ref_pos < start_ref_pos) return; int pix,cnt = 0; double sam_ra,sam_dec; if(tid < sam_N) { pix = sam_node[tid].pix; sam_ra = sam_node[tid].ra; sam_dec = sam_node[tid].dec; cnt = 0; } __syncthreads(); for(int ite = 0; ite < iteration; ++ite) { __syncthreads(); for(int k = 0; k < TILE_SIZE / blockDim.x; ++k) { int ref_pos = start_ref_pos + ite * TILE_SIZE + blockDim.x * k + threadIdx.x; int s_ref_pos = blockDim.x * k + threadIdx.x; if(ref_pos <= end_ref_pos) { s_ref_pix[s_ref_pos] = ref_node[ref_pos].pix; s_ref_ra[s_ref_pos] = ref_node[ref_pos].ra; s_ref_dec[s_ref_pos] = ref_node[ref_pos].dec; } else s_ref_pix[s_ref_pos] = -1; } __syncthreads(); if(tid >= sam_N) continue; for(int j = 0; j < TILE_SIZE; ++j) { if(s_ref_pix[j] == -1 || s_ref_pix[j] > pix) break; if(s_ref_pix[j] < pix) continue; if(s_ref_pix[j] == pix && matched(sam_ra,sam_dec,s_ref_ra[j],s_ref_dec[j],0.0056)) { cnt++; if(cnt <= 5) sam_match[tid * 5 + cnt - 1] = ref_offset + start_ref_pos + ite * TILE_SIZE + j; } } } if(tid < sam_N) sam_matchedCnt[tid] = cnt; } void singleCM(NODE h_ref_node[], int ref_N, NODE h_sam_node[], int sam_N, int h_sam_match[],int h_sam_matchedCnt[]) { //the maximum number of sample points that can be matched each time by each card int part_sam_N = 25000000; int part_ref_N = 8 * part_sam_N; NODE *d_ref_node[GPU_N]; NODE *d_sam_node[GPU_N]; int *d_sam_match[GPU_N], *d_sam_matchedCnt[GPU_N]; omp_set_num_threads(GPU_N); #pragma omp parallel { int i = omp_get_thread_num() % GPU_N; checkCudaErrors(hipSetDevice(i)); checkCudaErrors(hipDeviceReset()); size_t free_mem,total_mem; checkCudaErrors(hipMemGetInfo(&free_mem,&total_mem)); printf("Card %d before malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize); checkCudaErrors(hipMalloc(&d_ref_node[i],sizeof(NODE) * part_ref_N)); checkCudaErrors(hipMalloc(&d_sam_node[i],sizeof(NODE) * part_sam_N)); checkCudaErrors(hipMalloc(&d_sam_match[i],sizeof(int) * part_sam_N * 5)); checkCudaErrors(hipMalloc(&d_sam_matchedCnt[i],sizeof(int) * part_sam_N)); checkCudaErrors(hipMemGetInfo(&free_mem,&total_mem)); printf("Card %d after malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize); //the total number of sample points processed by this card int card_sam_N; if(i == GPU_N - 1) card_sam_N = sam_N - i * sam_N / GPU_N; else card_sam_N = sam_N / GPU_N; int iteration = ceil(card_sam_N * 1.0 / part_sam_N); for(int ite = 0; ite < iteration; ++ite) { int cur_sam_N; if(ite == iteration - 1) // the last round cur_sam_N = card_sam_N - ite * part_sam_N; else cur_sam_N = part_sam_N; int start_sam_pos = ite * part_sam_N + i * sam_N / GPU_N; int end_sam_pos = start_sam_pos + cur_sam_N - 1; int start_pix = h_sam_node[start_sam_pos].pix; int end_pix = h_sam_node[end_sam_pos].pix; int start_ref_pos; if(start_pix == 0) start_ref_pos = 0; else start_ref_pos = binary_search(start_pix - 1,h_ref_node,ref_N); // start_ref_pos = get_start(start_pix,h_ref_node,ref_N); if(start_ref_pos == -1) continue; int end_ref_pos = binary_search(end_pix,h_ref_node,ref_N) - 1; if(end_ref_pos == -2) end_ref_pos = ref_N - 1; int cur_ref_N = end_ref_pos - start_ref_pos + 1; dim3 block(block_size); dim3 grid(min(65536,(int)ceil(cur_sam_N * 1.0 / block.x))); if(cur_ref_N == 0) continue; printf("\n\nCard %d iteration %d\n",i,ite); printf("block.x %d grid.x %d\n",block.x,grid.x); printf("start_sam_pos %d start_sam_pix %d end_sam_pos %d end_sam_pix %d sam_N %d\n",start_sam_pos,start_pix,end_sam_pos,end_pix,cur_sam_N); printf("start_ref_pos %d start_ref_pix %d end_ref_pos %d end_ref_pix %d ref_N %d\n",start_ref_pos,h_ref_node[start_ref_pos].pix,end_ref_pos,h_ref_node[end_ref_pos].pix,cur_ref_N); checkCudaErrors(hipMemcpy(d_sam_node[i],h_sam_node + start_sam_pos,cur_sam_N * sizeof(NODE),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_ref_node[i],h_ref_node + start_ref_pos,cur_ref_N * sizeof(NODE), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel_singleCM), dim3(grid),dim3(block), 0, 0, d_ref_node[i],cur_ref_N,d_sam_node[i],cur_sam_N,d_sam_match[i],d_sam_matchedCnt[i],start_ref_pos,start_sam_pos); checkCudaErrors(hipMemcpy(h_sam_matchedCnt + start_sam_pos,d_sam_matchedCnt[i],cur_sam_N * sizeof(int),hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_sam_match + start_sam_pos * 5,d_sam_match[i],cur_sam_N * 5 * sizeof(int),hipMemcpyDeviceToHost)); } } unsigned long long sum = 0; int cnt[1000]; memset(cnt,0,sizeof(cnt)); for(int i = sam_N - 1; i >= 0; --i) { sum += h_sam_matchedCnt[i]; /* cout << i << " " << h_sam_matchedCnt[i] << endl; cout << h_sam_node[i].ra << " " << h_sam_node[i].dec << endl; cout << "\n----------------\n" << endl; for(int j = i * 5; j < i * 5 + min(5,h_sam_matchedCnt[i]); ++j) { int pos = h_sam_match[j]; cout << h_ref_node[pos].ra << " " << h_ref_node[pos].dec << endl; } cout << "\n--------------------\n" << endl; */ } cout << "sum " << sum << endl; cout << "ave " << sum * 1.0 / sam_N << endl; } int main(int argc, char *argv[]) { struct timeval start,end; int ref_N = atoi(argv[3]); int sam_N = atoi(argv[4]); // const int ref_N = 200006373; time_t rawtime; FILE *fd = fopen(argv[1],"r"); for(int i = 0; i < 20; ++i) fscanf(fd,"%d%s",&ref_line[i],ref_file[i]); fclose(fd); fd = fopen(argv[2],"r"); for(int i = 0; i < 20; ++i) fscanf(fd,"%d%s",&sam_line[i],sam_file[i]); fclose(fd); NODE *ref_node,*sam_node; int *sam_matchedCnt; int *sam_match; ref_node = (NODE *)malloc(sizeof(NODE) * ref_N); sam_node = (NODE *)malloc(sizeof(NODE) * sam_N); sam_matchedCnt = (int *)malloc(sizeof(int) * sam_N); sam_match = (int *)malloc(sizeof(int) * sam_N * 5); time(&rawtime); printf("before read ref file : %s\n",ctime(&rawtime)); omp_set_num_threads(20); #pragma omp parallel { int i = omp_get_thread_num() % 20; int offset = i * ref_line[0]; readFile(ref_file[i],ref_line[i],ref_node + offset); } time(&rawtime); printf("after read ref file : %s\n",ctime(&rawtime)); #pragma omp parallel { int i = omp_get_thread_num() % 20; int offset = i * sam_line[0]; readFile(sam_file[i],sam_line[i],sam_node + offset); } time(&rawtime); printf("after read sam file : %s\n",ctime(&rawtime)); gettimeofday(&start,NULL); tbb::parallel_sort(ref_node,ref_node + ref_N,cmp); tbb::parallel_sort(sam_node,sam_node + sam_N,cmp); gettimeofday(&end,NULL); printf("sort time %.3f \n",diffTime(start,end) * 0.001); time(&rawtime); printf("after sort : %s\n",ctime(&rawtime)); gettimeofday(&start,NULL); singleCM(ref_node,ref_N,sam_node,sam_N,sam_match,sam_matchedCnt); gettimeofday(&end,NULL); printf("single CM %.3f s\n",diffTime(start,end) * 0.001); printf("single CM %.3f min\n",diffTime(start,end) * 0.001 / 60); time(&rawtime); printf("singleCM : %s\n",ctime(&rawtime)); int *ref_match = (int*)malloc(sizeof(int) * ref_N * 5); int *ref_matchedCnt = (int*)malloc(sizeof(int) * ref_N); gettimeofday(&start,NULL); singleCM(sam_node,sam_N,ref_node,ref_N,ref_match,ref_matchedCnt); gettimeofday(&end,NULL); printf("singe CM-2 %.3f s\n",diffTime(start,end) * 0.001); free(sam_node); free(ref_node); free(ref_match); free(sam_match); free(ref_matchedCnt); free(sam_matchedCnt); return 0; }
6fee5be5fe091dc0ed3be127f44cee5f69e8c443.cu
#include <iostream> #include <cstdio> #include <cstdlib> #include <omp.h> #include <sys/time.h> #include <omp.h> #include <sys/mman.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <ctype.h> #include <cstring> #include "tbb/parallel_sort.h" #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <helper_cuda.h> using namespace std; const double pi=3.141592653589793238462643383279502884197; int ref_line[20]; char ref_file[20][16]; int sam_line[20]; char sam_file[20][16]; const int GPU_N = 2; const int GBSize = 1024 * 1024 * 1024; const int block_size = 512; const int TILE_SIZE = 1024; struct NODE { double ra,dec; int pix; }; const int cntSize = 805306368; double diffTime(timeval start,timeval end) { return (end.tv_sec - start.tv_sec) * 1000 + (end.tv_usec - start.tv_usec) * 0.001; } bool cmp(NODE a,NODE b) { return a.pix < b.pix; } void readFile(char *file,int N, NODE nn[]) { FILE *fd = fopen(file,"r"); if(fd == NULL) printf("Read %s error!\n",file); for(int i = 0; i < N; ++i) fscanf(fd,"%d%lf%lf",&nn[i].pix,&nn[i].ra,&nn[i].dec); fclose(fd); } __host__ __device__ int begin_index(int key, NODE *node, int N) { for(int i = 0; i < N; ++i) if(node[i].pix > key) return i; return N; } __host__ __device__ int get_start(int key,NODE *node,int N) { for(int i = 0; i < N; ++i) { if(node[i].pix >= key) return i; } return N; } __host__ __device__ int get_end(int key,NODE *node,int N) { for(int i = 0; i < N; ++i) if(node[i].pix > key) return i; return N; } __host__ __device__ int binary_search(int key, NODE *node, int N) { int st = 0; int ed = N - 1; while(st < ed) { int mid = st + ((ed - st) >> 1); if(node[mid].pix <= key) st = mid + 1; else ed = mid; } if(node[ed].pix > key) return ed; return -1; } __host__ __device__ double radians(double degree) { return degree * pi / 180.0; } __host__ __device__ bool matched(double ra1,double dec1,double ra2,double dec2,double radius) { double z1 = sin(radians(dec1)); double x1 = cos(radians(dec1)) * cos(radians(ra1)); double y1 = cos(radians(dec1)) * sin(radians(ra1)); double z2 = sin(radians(dec2)); double x2 = cos(radians(dec2)) * cos(radians(ra2)); double y2 = cos(radians(dec2)) * sin(radians(ra2)); double distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2); double dist2 = 4 * pow(sin(radians(0.0056 / 2)),2); if(distance <= dist2) return true; return false; } __global__ void kernel_singleCM(NODE *ref_node, int ref_N, NODE *sam_node, int sam_N, int *sam_match,int *sam_matchedCnt,int ref_offset,int sam_offset) { __shared__ int s_ref_pix[TILE_SIZE]; __shared__ double s_ref_ra[TILE_SIZE]; __shared__ double s_ref_dec[TILE_SIZE]; __shared__ int start_pix,end_pix; __shared__ int start_ref_pos,end_ref_pos; __shared__ int block_sam_N,block_ref_N; __shared__ int iteration; int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < sam_N) sam_matchedCnt[tid] = 0; if(threadIdx.x == 0) { if(blockIdx.x == gridDim.x - 1) // the last block block_sam_N = sam_N - blockIdx.x * blockDim.x; else block_sam_N = blockDim.x; start_pix = sam_node[tid].pix; end_pix = sam_node[tid + block_sam_N - 1].pix; if(start_pix == 0) start_ref_pos = 0; else start_ref_pos = binary_search(start_pix - 1,ref_node,ref_N); end_ref_pos = binary_search(end_pix,ref_node,ref_N); if(end_ref_pos == -1) end_ref_pos = ref_N - 1; else end_ref_pos--; block_ref_N = end_ref_pos - start_ref_pos + 1; iteration = ceil(block_ref_N * 1.0 / TILE_SIZE); } __syncthreads(); if(start_ref_pos == -1 || end_ref_pos < start_ref_pos) return; int pix,cnt = 0; double sam_ra,sam_dec; if(tid < sam_N) { pix = sam_node[tid].pix; sam_ra = sam_node[tid].ra; sam_dec = sam_node[tid].dec; cnt = 0; } __syncthreads(); for(int ite = 0; ite < iteration; ++ite) { __syncthreads(); for(int k = 0; k < TILE_SIZE / blockDim.x; ++k) { int ref_pos = start_ref_pos + ite * TILE_SIZE + blockDim.x * k + threadIdx.x; int s_ref_pos = blockDim.x * k + threadIdx.x; if(ref_pos <= end_ref_pos) { s_ref_pix[s_ref_pos] = ref_node[ref_pos].pix; s_ref_ra[s_ref_pos] = ref_node[ref_pos].ra; s_ref_dec[s_ref_pos] = ref_node[ref_pos].dec; } else s_ref_pix[s_ref_pos] = -1; } __syncthreads(); if(tid >= sam_N) continue; for(int j = 0; j < TILE_SIZE; ++j) { if(s_ref_pix[j] == -1 || s_ref_pix[j] > pix) break; if(s_ref_pix[j] < pix) continue; if(s_ref_pix[j] == pix && matched(sam_ra,sam_dec,s_ref_ra[j],s_ref_dec[j],0.0056)) { cnt++; if(cnt <= 5) sam_match[tid * 5 + cnt - 1] = ref_offset + start_ref_pos + ite * TILE_SIZE + j; } } } if(tid < sam_N) sam_matchedCnt[tid] = cnt; } void singleCM(NODE h_ref_node[], int ref_N, NODE h_sam_node[], int sam_N, int h_sam_match[],int h_sam_matchedCnt[]) { //the maximum number of sample points that can be matched each time by each card int part_sam_N = 25000000; int part_ref_N = 8 * part_sam_N; NODE *d_ref_node[GPU_N]; NODE *d_sam_node[GPU_N]; int *d_sam_match[GPU_N], *d_sam_matchedCnt[GPU_N]; omp_set_num_threads(GPU_N); #pragma omp parallel { int i = omp_get_thread_num() % GPU_N; checkCudaErrors(cudaSetDevice(i)); checkCudaErrors(cudaDeviceReset()); size_t free_mem,total_mem; checkCudaErrors(cudaMemGetInfo(&free_mem,&total_mem)); printf("Card %d before malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize); checkCudaErrors(cudaMalloc(&d_ref_node[i],sizeof(NODE) * part_ref_N)); checkCudaErrors(cudaMalloc(&d_sam_node[i],sizeof(NODE) * part_sam_N)); checkCudaErrors(cudaMalloc(&d_sam_match[i],sizeof(int) * part_sam_N * 5)); checkCudaErrors(cudaMalloc(&d_sam_matchedCnt[i],sizeof(int) * part_sam_N)); checkCudaErrors(cudaMemGetInfo(&free_mem,&total_mem)); printf("Card %d after malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize); //the total number of sample points processed by this card int card_sam_N; if(i == GPU_N - 1) card_sam_N = sam_N - i * sam_N / GPU_N; else card_sam_N = sam_N / GPU_N; int iteration = ceil(card_sam_N * 1.0 / part_sam_N); for(int ite = 0; ite < iteration; ++ite) { int cur_sam_N; if(ite == iteration - 1) // the last round cur_sam_N = card_sam_N - ite * part_sam_N; else cur_sam_N = part_sam_N; int start_sam_pos = ite * part_sam_N + i * sam_N / GPU_N; int end_sam_pos = start_sam_pos + cur_sam_N - 1; int start_pix = h_sam_node[start_sam_pos].pix; int end_pix = h_sam_node[end_sam_pos].pix; int start_ref_pos; if(start_pix == 0) start_ref_pos = 0; else start_ref_pos = binary_search(start_pix - 1,h_ref_node,ref_N); // start_ref_pos = get_start(start_pix,h_ref_node,ref_N); if(start_ref_pos == -1) continue; int end_ref_pos = binary_search(end_pix,h_ref_node,ref_N) - 1; if(end_ref_pos == -2) end_ref_pos = ref_N - 1; int cur_ref_N = end_ref_pos - start_ref_pos + 1; dim3 block(block_size); dim3 grid(min(65536,(int)ceil(cur_sam_N * 1.0 / block.x))); if(cur_ref_N == 0) continue; printf("\n\nCard %d iteration %d\n",i,ite); printf("block.x %d grid.x %d\n",block.x,grid.x); printf("start_sam_pos %d start_sam_pix %d end_sam_pos %d end_sam_pix %d sam_N %d\n",start_sam_pos,start_pix,end_sam_pos,end_pix,cur_sam_N); printf("start_ref_pos %d start_ref_pix %d end_ref_pos %d end_ref_pix %d ref_N %d\n",start_ref_pos,h_ref_node[start_ref_pos].pix,end_ref_pos,h_ref_node[end_ref_pos].pix,cur_ref_N); checkCudaErrors(cudaMemcpy(d_sam_node[i],h_sam_node + start_sam_pos,cur_sam_N * sizeof(NODE),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_ref_node[i],h_ref_node + start_ref_pos,cur_ref_N * sizeof(NODE), cudaMemcpyHostToDevice)); kernel_singleCM<<<grid,block>>>(d_ref_node[i],cur_ref_N,d_sam_node[i],cur_sam_N,d_sam_match[i],d_sam_matchedCnt[i],start_ref_pos,start_sam_pos); checkCudaErrors(cudaMemcpy(h_sam_matchedCnt + start_sam_pos,d_sam_matchedCnt[i],cur_sam_N * sizeof(int),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_sam_match + start_sam_pos * 5,d_sam_match[i],cur_sam_N * 5 * sizeof(int),cudaMemcpyDeviceToHost)); } } unsigned long long sum = 0; int cnt[1000]; memset(cnt,0,sizeof(cnt)); for(int i = sam_N - 1; i >= 0; --i) { sum += h_sam_matchedCnt[i]; /* cout << i << " " << h_sam_matchedCnt[i] << endl; cout << h_sam_node[i].ra << " " << h_sam_node[i].dec << endl; cout << "\n----------------\n" << endl; for(int j = i * 5; j < i * 5 + min(5,h_sam_matchedCnt[i]); ++j) { int pos = h_sam_match[j]; cout << h_ref_node[pos].ra << " " << h_ref_node[pos].dec << endl; } cout << "\n--------------------\n" << endl; */ } cout << "sum " << sum << endl; cout << "ave " << sum * 1.0 / sam_N << endl; } int main(int argc, char *argv[]) { struct timeval start,end; int ref_N = atoi(argv[3]); int sam_N = atoi(argv[4]); // const int ref_N = 200006373; time_t rawtime; FILE *fd = fopen(argv[1],"r"); for(int i = 0; i < 20; ++i) fscanf(fd,"%d%s",&ref_line[i],ref_file[i]); fclose(fd); fd = fopen(argv[2],"r"); for(int i = 0; i < 20; ++i) fscanf(fd,"%d%s",&sam_line[i],sam_file[i]); fclose(fd); NODE *ref_node,*sam_node; int *sam_matchedCnt; int *sam_match; ref_node = (NODE *)malloc(sizeof(NODE) * ref_N); sam_node = (NODE *)malloc(sizeof(NODE) * sam_N); sam_matchedCnt = (int *)malloc(sizeof(int) * sam_N); sam_match = (int *)malloc(sizeof(int) * sam_N * 5); time(&rawtime); printf("before read ref file : %s\n",ctime(&rawtime)); omp_set_num_threads(20); #pragma omp parallel { int i = omp_get_thread_num() % 20; int offset = i * ref_line[0]; readFile(ref_file[i],ref_line[i],ref_node + offset); } time(&rawtime); printf("after read ref file : %s\n",ctime(&rawtime)); #pragma omp parallel { int i = omp_get_thread_num() % 20; int offset = i * sam_line[0]; readFile(sam_file[i],sam_line[i],sam_node + offset); } time(&rawtime); printf("after read sam file : %s\n",ctime(&rawtime)); gettimeofday(&start,NULL); tbb::parallel_sort(ref_node,ref_node + ref_N,cmp); tbb::parallel_sort(sam_node,sam_node + sam_N,cmp); gettimeofday(&end,NULL); printf("sort time %.3f \n",diffTime(start,end) * 0.001); time(&rawtime); printf("after sort : %s\n",ctime(&rawtime)); gettimeofday(&start,NULL); singleCM(ref_node,ref_N,sam_node,sam_N,sam_match,sam_matchedCnt); gettimeofday(&end,NULL); printf("single CM %.3f s\n",diffTime(start,end) * 0.001); printf("single CM %.3f min\n",diffTime(start,end) * 0.001 / 60); time(&rawtime); printf("singleCM : %s\n",ctime(&rawtime)); int *ref_match = (int*)malloc(sizeof(int) * ref_N * 5); int *ref_matchedCnt = (int*)malloc(sizeof(int) * ref_N); gettimeofday(&start,NULL); singleCM(sam_node,sam_N,ref_node,ref_N,ref_match,ref_matchedCnt); gettimeofday(&end,NULL); printf("singe CM-2 %.3f s\n",diffTime(start,end) * 0.001); free(sam_node); free(ref_node); free(ref_match); free(sam_match); free(ref_matchedCnt); free(sam_matchedCnt); return 0; }
feb0eac4733ed31bd44f95442c51729ad36ef0ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Liao, 7/9/2014, add collapse() inside jacobi() #include <stdio.h> #include <math.h> #include <assert.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> #include "libxomp.h" #include "xomp_cuda_lib_inlined.cu" double time_stamp() { struct timeval t; double time; gettimeofday(&t,((struct timezone *)((void *)0))); time = t . tv_sec + 1.0e-6 * t . tv_usec; return time; } double time1; double time2; void driver(); void initialize(); void jacobi(); void error_check(); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 512 int n; int m; int mits; #define REAL float // flexible between float and double // depending on MSIZE!! float error_ref = 9.212767E-04; float resid_ref = 2.355429E-08; float tol; float relax = 1.0; float alpha = 0.0543; float u[512][512]; float f[512][512]; float uold[512][512]; float dx; float dy; int main() { // float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n = 512; m = 512; tol = 0.0000000001; mits = 5000; #if 0 // Not yet support concurrent CPU and GPU threads #ifdef _OPENMP #endif #endif driver(); return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver() { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi(); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2 - time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check(); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize() { int i; int j; int xx; int yy; //double PI=3.1415926; dx = (2.0 / (n - 1)); dy = (2.0 / (m - 1)); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = ((int )(- 1.0 + (dx * (i - 1)))); yy = ((int )(- 1.0 + (dy * (j - 1)))); u[i][j] = 0.0; f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy))); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ __global__ void OUT__1__8714__(float omega,float ax,float ay,float b,int __final_total_iters__2__,int __i_interval__3__,float *_dev_per_block_error,float *_dev_u,float *_dev_f,float *_dev_uold) { int _p_i; int _p_j; float _p_error; _p_error = 0; float _p_resid; int _p___collapsed_index__5__; int _dev_lower; int _dev_upper; int _dev_loop_chunk_size; int _dev_loop_sched_index; int _dev_loop_stride; int _dev_thread_num = getCUDABlockThreadCount(1); int _dev_thread_id = getLoopIndexFromCUDAVariables(1); XOMP_static_sched_init(0,__final_total_iters__2__ - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride); while(XOMP_static_sched_next(&_dev_loop_sched_index,__final_total_iters__2__ - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper)) for (_p___collapsed_index__5__ = _dev_lower; _p___collapsed_index__5__ <= _dev_upper; _p___collapsed_index__5__ += 1) { _p_i = _p___collapsed_index__5__ / __i_interval__3__ * 1 + 1; _p_j = _p___collapsed_index__5__ % __i_interval__3__ * 1 + 1; _p_resid = (ax * (_dev_uold[(_p_i - 1) * 512 + _p_j] + _dev_uold[(_p_i + 1) * 512 + _p_j]) + ay * (_dev_uold[_p_i * 512 + (_p_j - 1)] + _dev_uold[_p_i * 512 + (_p_j + 1)]) + b * _dev_uold[_p_i * 512 + _p_j] - _dev_f[_p_i * 512 + _p_j]) / b; _dev_u[_p_i * 512 + _p_j] = _dev_uold[_p_i * 512 + _p_j] - omega * _p_resid; _p_error = _p_error + _p_resid * _p_resid; } xomp_inner_block_reduction_float(_p_error,_dev_per_block_error,6); } __global__ void OUT__2__8714__(float *_dev_u,float *_dev_uold,int __final_total_iters__8__,int __i_interval__9__) { int _p___collapsed_index__11__; int _p_i; int _p_j; int _dev_lower; int _dev_upper; int _dev_loop_chunk_size; int _dev_loop_sched_index; int _dev_loop_stride; int _dev_thread_num = getCUDABlockThreadCount(1); int _dev_thread_id = getLoopIndexFromCUDAVariables(1); XOMP_static_sched_init(0,__final_total_iters__8__ - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride); while(XOMP_static_sched_next(&_dev_loop_sched_index,__final_total_iters__8__ - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper)) for (_p___collapsed_index__11__ = _dev_lower; _p___collapsed_index__11__ <= _dev_upper; _p___collapsed_index__11__ += 1) { _p_i = _p___collapsed_index__11__ / __i_interval__9__ * 1 + 0; _p_j = _p___collapsed_index__11__ % __i_interval__9__ * 1 + 0; _dev_uold[_p_i * 512 + _p_j] = _dev_u[_p_i * 512 + _p_j]; } } void jacobi() { float omega; int i; int j; int k; float error; float resid; float ax; float ay; float b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega = relax; /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha); error = (10.0 * tol); k = 1; /* Translated from #pragma omp target data ... */ { float *_dev_u; int _dev_u_size = sizeof(float ) * n * m; _dev_u = ((float *)(xomp_deviceMalloc(_dev_u_size))); xomp_memcpyHostToDevice(((void *)_dev_u),((const void *)u),_dev_u_size); float *_dev_f; int _dev_f_size = sizeof(float ) * n * m; _dev_f = ((float *)(xomp_deviceMalloc(_dev_f_size))); xomp_memcpyHostToDevice(((void *)_dev_f),((const void *)f),_dev_f_size); float *_dev_uold; int _dev_uold_size = sizeof(float ) * n * m; _dev_uold = ((float *)(xomp_deviceMalloc(_dev_uold_size))); while(k <= mits && error > tol){ int __i_total_iters__0__ = (n - 1 - 1 - 1 + 1) % 1 == 0?(n - 1 - 1 - 1 + 1) / 1 : (n - 1 - 1 - 1 + 1) / 1 + 1; int __j_total_iters__1__ = (m - 1 - 1 - 1 + 1) % 1 == 0?(m - 1 - 1 - 1 + 1) / 1 : (m - 1 - 1 - 1 + 1) / 1 + 1; int __final_total_iters__2__ = 1 * __i_total_iters__0__ * __j_total_iters__1__; int __i_interval__3__ = __j_total_iters__1__ * 1; int __j_interval__4__ = 1; int __collapsed_index__5__; int __i_total_iters__6__ = (n - 1 - 0 + 1) % 1 == 0?(n - 1 - 0 + 1) / 1 : (n - 1 - 0 + 1) / 1 + 1; int __j_total_iters__7__ = (m - 1 - 0 + 1) % 1 == 0?(m - 1 - 0 + 1) / 1 : (m - 1 - 0 + 1) / 1 + 1; int __final_total_iters__8__ = 1 * __i_total_iters__6__ * __j_total_iters__7__; int __i_interval__9__ = __j_total_iters__7__ * 1; int __j_interval__10__ = 1; int __collapsed_index__11__; error = 0.0; /* Copy new solution into old */ { /* Launch CUDA kernel ... */ int _threads_per_block_ = xomp_get_maxThreadsPerBlock(); int _num_blocks_ = xomp_get_max1DBlock(__final_total_iters__8__ - 1 - 0 + 1); hipLaunchKernelGGL(( OUT__2__8714__), dim3(_num_blocks_),dim3(_threads_per_block_), 0, 0, _dev_u,_dev_uold,__final_total_iters__8__,__i_interval__9__); } { /* Launch CUDA kernel ... */ int _threads_per_block_ = xomp_get_maxThreadsPerBlock(); int _num_blocks_ = xomp_get_max1DBlock(__final_total_iters__2__ - 1 - 0 + 1); float *_dev_per_block_error = (float *)(xomp_deviceMalloc(_num_blocks_ * sizeof(float ))); hipLaunchKernelGGL(( OUT__1__8714__), dim3(_num_blocks_),dim3(_threads_per_block_),(_threads_per_block_ * sizeof(float )), 0, omega,ax,ay,b,__final_total_iters__2__,__i_interval__3__,_dev_per_block_error,_dev_u,_dev_f,_dev_uold); error = xomp_beyond_block_reduction_float(_dev_per_block_error,_num_blocks_,6); xomp_freeDevice(_dev_per_block_error); } // } /* omp end parallel */ /* Error check */ if (k % 500 == 0) { printf("Finished %d iteration with error =%f\n",k,error); } error = (sqrt(error) / (n * m)); k = k + 1; /* End iteration loop */ } xomp_memcpyDeviceToHost(((void *)u),((const void *)_dev_u),_dev_u_size); xomp_freeDevice(_dev_u); xomp_freeDevice(_dev_f); xomp_freeDevice(_dev_uold); } printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n",error); printf("Residual_ref :%E\n",resid_ref); printf("Diff ref=%E\n",(fabs((error - resid_ref)))); fabs((error - resid_ref)) < 1E-14?((void )0) : __assert_fail("fabs(error-resid_ref) < 1E-14","jacobi-ompacc-opt2.c",235,__PRETTY_FUNCTION__); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check() { int i; int j; float xx; float yy; float temp; float error; dx = (2.0 / (n - 1)); dy = (2.0 / (m - 1)); error = 0.0; //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (- 1.0 + (dx * (i - 1))); yy = (- 1.0 + (dy * (j - 1))); temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy))); error = error + temp * temp; } error = (sqrt(error) / (n * m)); printf("Solution Error :%E \n",error); printf("Solution Error Ref :%E \n",error_ref); printf("Diff ref=%E\n",(fabs((error - error_ref)))); fabs((error - error_ref)) < 1E-14?((void )0) : __assert_fail("fabs(error-error_ref) < 1E-14","jacobi-ompacc-opt2.c",267,__PRETTY_FUNCTION__); }
feb0eac4733ed31bd44f95442c51729ad36ef0ee.cu
// Liao, 7/9/2014, add collapse() inside jacobi() #include <stdio.h> #include <math.h> #include <assert.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> #include "libxomp.h" #include "xomp_cuda_lib_inlined.cu" double time_stamp() { struct timeval t; double time; gettimeofday(&t,((struct timezone *)((void *)0))); time = t . tv_sec + 1.0e-6 * t . tv_usec; return time; } double time1; double time2; void driver(); void initialize(); void jacobi(); void error_check(); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 512 int n; int m; int mits; #define REAL float // flexible between float and double // depending on MSIZE!! float error_ref = 9.212767E-04; float resid_ref = 2.355429E-08; float tol; float relax = 1.0; float alpha = 0.0543; float u[512][512]; float f[512][512]; float uold[512][512]; float dx; float dy; int main() { // float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n = 512; m = 512; tol = 0.0000000001; mits = 5000; #if 0 // Not yet support concurrent CPU and GPU threads #ifdef _OPENMP #endif #endif driver(); return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver() { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi(); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2 - time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check(); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize() { int i; int j; int xx; int yy; //double PI=3.1415926; dx = (2.0 / (n - 1)); dy = (2.0 / (m - 1)); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = ((int )(- 1.0 + (dx * (i - 1)))); yy = ((int )(- 1.0 + (dy * (j - 1)))); u[i][j] = 0.0; f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy))); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ __global__ void OUT__1__8714__(float omega,float ax,float ay,float b,int __final_total_iters__2__,int __i_interval__3__,float *_dev_per_block_error,float *_dev_u,float *_dev_f,float *_dev_uold) { int _p_i; int _p_j; float _p_error; _p_error = 0; float _p_resid; int _p___collapsed_index__5__; int _dev_lower; int _dev_upper; int _dev_loop_chunk_size; int _dev_loop_sched_index; int _dev_loop_stride; int _dev_thread_num = getCUDABlockThreadCount(1); int _dev_thread_id = getLoopIndexFromCUDAVariables(1); XOMP_static_sched_init(0,__final_total_iters__2__ - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride); while(XOMP_static_sched_next(&_dev_loop_sched_index,__final_total_iters__2__ - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper)) for (_p___collapsed_index__5__ = _dev_lower; _p___collapsed_index__5__ <= _dev_upper; _p___collapsed_index__5__ += 1) { _p_i = _p___collapsed_index__5__ / __i_interval__3__ * 1 + 1; _p_j = _p___collapsed_index__5__ % __i_interval__3__ * 1 + 1; _p_resid = (ax * (_dev_uold[(_p_i - 1) * 512 + _p_j] + _dev_uold[(_p_i + 1) * 512 + _p_j]) + ay * (_dev_uold[_p_i * 512 + (_p_j - 1)] + _dev_uold[_p_i * 512 + (_p_j + 1)]) + b * _dev_uold[_p_i * 512 + _p_j] - _dev_f[_p_i * 512 + _p_j]) / b; _dev_u[_p_i * 512 + _p_j] = _dev_uold[_p_i * 512 + _p_j] - omega * _p_resid; _p_error = _p_error + _p_resid * _p_resid; } xomp_inner_block_reduction_float(_p_error,_dev_per_block_error,6); } __global__ void OUT__2__8714__(float *_dev_u,float *_dev_uold,int __final_total_iters__8__,int __i_interval__9__) { int _p___collapsed_index__11__; int _p_i; int _p_j; int _dev_lower; int _dev_upper; int _dev_loop_chunk_size; int _dev_loop_sched_index; int _dev_loop_stride; int _dev_thread_num = getCUDABlockThreadCount(1); int _dev_thread_id = getLoopIndexFromCUDAVariables(1); XOMP_static_sched_init(0,__final_total_iters__8__ - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride); while(XOMP_static_sched_next(&_dev_loop_sched_index,__final_total_iters__8__ - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper)) for (_p___collapsed_index__11__ = _dev_lower; _p___collapsed_index__11__ <= _dev_upper; _p___collapsed_index__11__ += 1) { _p_i = _p___collapsed_index__11__ / __i_interval__9__ * 1 + 0; _p_j = _p___collapsed_index__11__ % __i_interval__9__ * 1 + 0; _dev_uold[_p_i * 512 + _p_j] = _dev_u[_p_i * 512 + _p_j]; } } void jacobi() { float omega; int i; int j; int k; float error; float resid; float ax; float ay; float b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega = relax; /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha); error = (10.0 * tol); k = 1; /* Translated from #pragma omp target data ... */ { float *_dev_u; int _dev_u_size = sizeof(float ) * n * m; _dev_u = ((float *)(xomp_deviceMalloc(_dev_u_size))); xomp_memcpyHostToDevice(((void *)_dev_u),((const void *)u),_dev_u_size); float *_dev_f; int _dev_f_size = sizeof(float ) * n * m; _dev_f = ((float *)(xomp_deviceMalloc(_dev_f_size))); xomp_memcpyHostToDevice(((void *)_dev_f),((const void *)f),_dev_f_size); float *_dev_uold; int _dev_uold_size = sizeof(float ) * n * m; _dev_uold = ((float *)(xomp_deviceMalloc(_dev_uold_size))); while(k <= mits && error > tol){ int __i_total_iters__0__ = (n - 1 - 1 - 1 + 1) % 1 == 0?(n - 1 - 1 - 1 + 1) / 1 : (n - 1 - 1 - 1 + 1) / 1 + 1; int __j_total_iters__1__ = (m - 1 - 1 - 1 + 1) % 1 == 0?(m - 1 - 1 - 1 + 1) / 1 : (m - 1 - 1 - 1 + 1) / 1 + 1; int __final_total_iters__2__ = 1 * __i_total_iters__0__ * __j_total_iters__1__; int __i_interval__3__ = __j_total_iters__1__ * 1; int __j_interval__4__ = 1; int __collapsed_index__5__; int __i_total_iters__6__ = (n - 1 - 0 + 1) % 1 == 0?(n - 1 - 0 + 1) / 1 : (n - 1 - 0 + 1) / 1 + 1; int __j_total_iters__7__ = (m - 1 - 0 + 1) % 1 == 0?(m - 1 - 0 + 1) / 1 : (m - 1 - 0 + 1) / 1 + 1; int __final_total_iters__8__ = 1 * __i_total_iters__6__ * __j_total_iters__7__; int __i_interval__9__ = __j_total_iters__7__ * 1; int __j_interval__10__ = 1; int __collapsed_index__11__; error = 0.0; /* Copy new solution into old */ { /* Launch CUDA kernel ... */ int _threads_per_block_ = xomp_get_maxThreadsPerBlock(); int _num_blocks_ = xomp_get_max1DBlock(__final_total_iters__8__ - 1 - 0 + 1); OUT__2__8714__<<<_num_blocks_,_threads_per_block_>>>(_dev_u,_dev_uold,__final_total_iters__8__,__i_interval__9__); } { /* Launch CUDA kernel ... */ int _threads_per_block_ = xomp_get_maxThreadsPerBlock(); int _num_blocks_ = xomp_get_max1DBlock(__final_total_iters__2__ - 1 - 0 + 1); float *_dev_per_block_error = (float *)(xomp_deviceMalloc(_num_blocks_ * sizeof(float ))); OUT__1__8714__<<<_num_blocks_,_threads_per_block_,(_threads_per_block_ * sizeof(float ))>>>(omega,ax,ay,b,__final_total_iters__2__,__i_interval__3__,_dev_per_block_error,_dev_u,_dev_f,_dev_uold); error = xomp_beyond_block_reduction_float(_dev_per_block_error,_num_blocks_,6); xomp_freeDevice(_dev_per_block_error); } // } /* omp end parallel */ /* Error check */ if (k % 500 == 0) { printf("Finished %d iteration with error =%f\n",k,error); } error = (sqrt(error) / (n * m)); k = k + 1; /* End iteration loop */ } xomp_memcpyDeviceToHost(((void *)u),((const void *)_dev_u),_dev_u_size); xomp_freeDevice(_dev_u); xomp_freeDevice(_dev_f); xomp_freeDevice(_dev_uold); } printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n",error); printf("Residual_ref :%E\n",resid_ref); printf("Diff ref=%E\n",(fabs((error - resid_ref)))); fabs((error - resid_ref)) < 1E-14?((void )0) : __assert_fail("fabs(error-resid_ref) < 1E-14","jacobi-ompacc-opt2.c",235,__PRETTY_FUNCTION__); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check() { int i; int j; float xx; float yy; float temp; float error; dx = (2.0 / (n - 1)); dy = (2.0 / (m - 1)); error = 0.0; //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (- 1.0 + (dx * (i - 1))); yy = (- 1.0 + (dy * (j - 1))); temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy))); error = error + temp * temp; } error = (sqrt(error) / (n * m)); printf("Solution Error :%E \n",error); printf("Solution Error Ref :%E \n",error_ref); printf("Diff ref=%E\n",(fabs((error - error_ref)))); fabs((error - error_ref)) < 1E-14?((void )0) : __assert_fail("fabs(error-error_ref) < 1E-14","jacobi-ompacc-opt2.c",267,__PRETTY_FUNCTION__); }
c044258022f5ee45521160ad57bc4bb6e81d893b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" //#include "caffe/vision_layers.hpp" #include "caffe/layers/l1_loss_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ComputeSign(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1); } } template <typename Dtype> __global__ void FindNotNaNs(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index]==in[index] ? Dtype(1) : Dtype(0); } } template <typename Dtype> __global__ void KillNaNs(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index]==in[index] ? in[index] : Dtype(0); } } template <typename Dtype> __global__ void KillMasked(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0); // out[index] = out[index]==out[index] ? out[index] : Dtype(0); // out[index] = out[index]>1e3 ? 0 : out[index]; // out[index] = out[index]<-1e3 ? 0 : out[index]; } } template <typename Dtype> __global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int mask_idx = index % width_height; out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0); } } template <typename Dtype> __global__ void MaskPlateauValues(const int n, const Dtype* in, Dtype* out, Dtype plateau) { CUDA_KERNEL_LOOP(index, n) { if(fabs(in[index]) < plateau) out[index] = Dtype(0); // Mask out plateau values and keep other as is } } template <typename Dtype> __global__ void MaskPlateauValuesInitial(const int n, const Dtype* in, Dtype* out, Dtype plateau) { CUDA_KERNEL_LOOP(index, n) { out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1); } } template <typename Dtype> void L1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Blob<Dtype> *diffptr = diff_top_vec_[0]; Dtype dot, loss; if(bottom.size() > 1) { diff_layer_->Forward(bottom, diff_top_vec_); } // if necessary, compute the number of not-NaNs int count = bottom[0]->count(); int num = bottom[0]->num(); hipLaunchKernelGGL(( FindNotNaNs<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diffptr->gpu_data(), mask_.mutable_gpu_data()); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; if (this->layer_param_.l1_loss_param().normalize_by_num_entries()) { caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_); normalize_coeff_ /= mask_.channels(); } else { normalize_coeff_ = num; } if (this->layer_param_.l1_loss_param().l2_per_location()) { // set masked (NaNs only) to zero hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, mask_.gpu_data(), diffptr->mutable_gpu_data()); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; square_layer_->Forward(diff_top_vec_, square_top_vec_); sum_layer_->Forward(square_top_vec_, sum_top_vec_); // Mask plateau in summed blob (only one channel): if(this->layer_param_.l1_loss_param().plateau() > 0) { float plateau_val_squared = this->layer_param_.l1_loss_param().plateau() * this->layer_param_.l1_loss_param().plateau(); hipLaunchKernelGGL(( MaskPlateauValuesInitial<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data()); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_); // Note sign_ is set to all ones in Reshape caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot); } else { // Mask plateau: if(this->layer_param_.l1_loss_param().plateau() > 0) { hipLaunchKernelGGL(( MaskPlateauValues<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diffptr->gpu_data(), mask_.mutable_gpu_data(), this->layer_param_.l1_loss_param().plateau()); CUDA_POST_KERNEL_CHECK; } //mask_.print("MASK2"); // set masked (NaNs, plateau) to zero hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, mask_.gpu_data(), diffptr->mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; hipLaunchKernelGGL(( ComputeSign<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diffptr->gpu_data(), sign_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; caffe_gpu_dot(count, diffptr->gpu_data(), sign_.gpu_data(), &dot); } loss = dot / normalize_coeff_; top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void L1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { bool prop_down = propagate_down[0]; if(bottom.size() > 1) prop_down |= propagate_down[1]; Blob<Dtype> *diffptr = diff_top_vec_[0]; if (prop_down) { const Dtype alpha = top[0]->cpu_diff()[0] / normalize_coeff_; if (this->layer_param_.l1_loss_param().l2_per_location()) { vector<bool> prop_down(1,true); caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(), Dtype(0), sqrt_output_.mutable_gpu_diff()); sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_); if(this->layer_param_.l1_loss_param().plateau() > 0) { hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff()); hipDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_); square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_); } else { caffe_gpu_axpby(diffptr->count(), alpha, sign_.gpu_data(), Dtype(0), diffptr->mutable_gpu_diff()); } hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(diffptr->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if(bottom.size() > 1) { diff_layer_->Backward(diff_top_vec_, propagate_down, bottom); } } } INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer); } // namespace caffe
c044258022f5ee45521160ad57bc4bb6e81d893b.cu
#include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" //#include "caffe/vision_layers.hpp" #include "caffe/layers/l1_loss_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ComputeSign(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1); } } template <typename Dtype> __global__ void FindNotNaNs(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index]==in[index] ? Dtype(1) : Dtype(0); } } template <typename Dtype> __global__ void KillNaNs(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index]==in[index] ? in[index] : Dtype(0); } } template <typename Dtype> __global__ void KillMasked(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0); // out[index] = out[index]==out[index] ? out[index] : Dtype(0); // out[index] = out[index]>1e3 ? 0 : out[index]; // out[index] = out[index]<-1e3 ? 0 : out[index]; } } template <typename Dtype> __global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int mask_idx = index % width_height; out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0); } } template <typename Dtype> __global__ void MaskPlateauValues(const int n, const Dtype* in, Dtype* out, Dtype plateau) { CUDA_KERNEL_LOOP(index, n) { if(fabs(in[index]) < plateau) out[index] = Dtype(0); // Mask out plateau values and keep other as is } } template <typename Dtype> __global__ void MaskPlateauValuesInitial(const int n, const Dtype* in, Dtype* out, Dtype plateau) { CUDA_KERNEL_LOOP(index, n) { out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1); } } template <typename Dtype> void L1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Blob<Dtype> *diffptr = diff_top_vec_[0]; Dtype dot, loss; if(bottom.size() > 1) { diff_layer_->Forward(bottom, diff_top_vec_); } // if necessary, compute the number of not-NaNs int count = bottom[0]->count(); int num = bottom[0]->num(); FindNotNaNs<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diffptr->gpu_data(), mask_.mutable_gpu_data()); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; if (this->layer_param_.l1_loss_param().normalize_by_num_entries()) { caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_); normalize_coeff_ /= mask_.channels(); } else { normalize_coeff_ = num; } if (this->layer_param_.l1_loss_param().l2_per_location()) { // set masked (NaNs only) to zero KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, mask_.gpu_data(), diffptr->mutable_gpu_data()); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; square_layer_->Forward(diff_top_vec_, square_top_vec_); sum_layer_->Forward(square_top_vec_, sum_top_vec_); // Mask plateau in summed blob (only one channel): if(this->layer_param_.l1_loss_param().plateau() > 0) { float plateau_val_squared = this->layer_param_.l1_loss_param().plateau() * this->layer_param_.l1_loss_param().plateau(); MaskPlateauValuesInitial<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>( sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>( sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data()); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_); // Note sign_ is set to all ones in Reshape caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot); } else { // Mask plateau: if(this->layer_param_.l1_loss_param().plateau() > 0) { MaskPlateauValues<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diffptr->gpu_data(), mask_.mutable_gpu_data(), this->layer_param_.l1_loss_param().plateau()); CUDA_POST_KERNEL_CHECK; } //mask_.print("MASK2"); // set masked (NaNs, plateau) to zero KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, mask_.gpu_data(), diffptr->mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; ComputeSign<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diffptr->gpu_data(), sign_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; caffe_gpu_dot(count, diffptr->gpu_data(), sign_.gpu_data(), &dot); } loss = dot / normalize_coeff_; top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void L1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { bool prop_down = propagate_down[0]; if(bottom.size() > 1) prop_down |= propagate_down[1]; Blob<Dtype> *diffptr = diff_top_vec_[0]; if (prop_down) { const Dtype alpha = top[0]->cpu_diff()[0] / normalize_coeff_; if (this->layer_param_.l1_loss_param().l2_per_location()) { vector<bool> prop_down(1,true); caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(), Dtype(0), sqrt_output_.mutable_gpu_diff()); sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_); if(this->layer_param_.l1_loss_param().plateau() > 0) { KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>( sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff()); cudaDeviceSynchronize(); CUDA_POST_KERNEL_CHECK; } sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_); square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_); } else { caffe_gpu_axpby(diffptr->count(), alpha, sign_.gpu_data(), Dtype(0), diffptr->mutable_gpu_diff()); } KillMasked<Dtype><<<CAFFE_GET_BLOCKS(diffptr->count()), CAFFE_CUDA_NUM_THREADS>>>( diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if(bottom.size() > 1) { diff_layer_->Backward(diff_top_vec_, propagate_down, bottom); } } } INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer); } // namespace caffe
dcd9a4d8f4826b0a88d6792a327d046c947c2224.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#define GPU_TH 1024 //#define TILE 8 __global__ void mul_gpu(int *A, int *B, int *C, int size) { // A, B jsou vstupni matice // C je vystupni matice // size je dim A __shared__ int As[TILE][TILE]; __shared__ int Bs[TILE][TILE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int x = by * TILE + ty; int y = bx * TILE + tx; // printf("Hello %dx%d\n",block, thread); int tmp = 0; for (int k = 0; k < size/TILE; k++) { As[ty][tx] = A[x*size + k*TILE + tx]; Bs[ty][tx] = B[y + (k*TILE + ty)*size]; } __syncthreads(); for (int i = 0; i < TILE; i++) { tmp += As[ty][i] * Bs[i][tx]; } // vystup C[x*size + y] = tmp; // synchronizace pred prepnutim -- jinak dava spatny vysledek? __syncthreads(); } // trivial algorithm void trivial(int size, int ** A, int ** B, int ** C) { clock_t start, end; clock_t start_gpu, end_gpu; start = clock(); int *cuda_A; int *cuda_B; int *cuda_C; // nastaveni spusteni //int gx; //int bx; /* if (size < 0) { gx = size; bx = size; } else { // zajistit saturaci bx = GPU_TH; gx = (((size*size)/GPU_TH) + 1)/2; }*/ dim3 grid(size/TILE, size/TILE, 1); dim3 block(TILE, TILE, 1); // cout << "pred alokaci" << flush << endl; hipMalloc((void**)&cuda_A, sizeof(int)*size*size); hipMalloc((void**)&cuda_B, sizeof(int)*size*size); hipMalloc((void**)&cuda_C, sizeof(int)*size*size); // cout << "pred kopirovanim" << flush << endl; for (int i = 0; i < size; i++) { // cout << "pruchod: " << i << flush << endl; hipMemcpy(&cuda_A[i*size], A[i], sizeof(int)*size, hipMemcpyHostToDevice); hipMemcpy(&cuda_B[i*size], B[i], sizeof(int)*size, hipMemcpyHostToDevice); } // hipMemcpy(cuda_A, A, sizeof(int)*size*size, hipMemcpyHostToDevice); // hipMemcpy(cuda_B, B, sizeof(int)*size*size, hipMemcpyHostToDevice); // cout << "pred spustenim" << flush << endl; start_gpu = clock(); hipLaunchKernelGGL(( mul_gpu), dim3(grid), dim3(block) , 0, 0, cuda_A, cuda_B, cuda_C, size); end_gpu = clock(); // cout << "pred synchronizaci kernelu" << flush << endl; hipDeviceSynchronize(); // cout << "pred dolovanim vysledku" << flush << endl; // hipMemcpy(C, cuda_C, sizeof(int)*size*size, hipMemcpyDeviceToHost); for (int i = 0; i < size; i++) { // cout << "pruchod: " << i << flush << endl; hipMemcpy(C[i], &cuda_C[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); } // cout << "pred uvolneni pameti" << flush << endl; hipFree(cuda_A); hipFree(cuda_B); hipFree(cuda_C); // cout << "pred ukoncenim" << flush << endl; end = clock(); cout << "Running for " << (double)(end-start)/CLOCKS_PER_SEC << endl << flush; cout << "GPU running for " << (double)(end_gpu-start_gpu)/CLOCKS_PER_SEC << endl << flush; }
dcd9a4d8f4826b0a88d6792a327d046c947c2224.cu
//#define GPU_TH 1024 //#define TILE 8 __global__ void mul_gpu(int *A, int *B, int *C, int size) { // A, B jsou vstupni matice // C je vystupni matice // size je dim A __shared__ int As[TILE][TILE]; __shared__ int Bs[TILE][TILE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int x = by * TILE + ty; int y = bx * TILE + tx; // printf("Hello %dx%d\n",block, thread); int tmp = 0; for (int k = 0; k < size/TILE; k++) { As[ty][tx] = A[x*size + k*TILE + tx]; Bs[ty][tx] = B[y + (k*TILE + ty)*size]; } __syncthreads(); for (int i = 0; i < TILE; i++) { tmp += As[ty][i] * Bs[i][tx]; } // vystup C[x*size + y] = tmp; // synchronizace pred prepnutim -- jinak dava spatny vysledek? __syncthreads(); } // trivial algorithm void trivial(int size, int ** A, int ** B, int ** C) { clock_t start, end; clock_t start_gpu, end_gpu; start = clock(); int *cuda_A; int *cuda_B; int *cuda_C; // nastaveni spusteni //int gx; //int bx; /* if (size < 0) { gx = size; bx = size; } else { // zajistit saturaci bx = GPU_TH; gx = (((size*size)/GPU_TH) + 1)/2; }*/ dim3 grid(size/TILE, size/TILE, 1); dim3 block(TILE, TILE, 1); // cout << "pred alokaci" << flush << endl; cudaMalloc((void**)&cuda_A, sizeof(int)*size*size); cudaMalloc((void**)&cuda_B, sizeof(int)*size*size); cudaMalloc((void**)&cuda_C, sizeof(int)*size*size); // cout << "pred kopirovanim" << flush << endl; for (int i = 0; i < size; i++) { // cout << "pruchod: " << i << flush << endl; cudaMemcpy(&cuda_A[i*size], A[i], sizeof(int)*size, cudaMemcpyHostToDevice); cudaMemcpy(&cuda_B[i*size], B[i], sizeof(int)*size, cudaMemcpyHostToDevice); } // cudaMemcpy(cuda_A, A, sizeof(int)*size*size, cudaMemcpyHostToDevice); // cudaMemcpy(cuda_B, B, sizeof(int)*size*size, cudaMemcpyHostToDevice); // cout << "pred spustenim" << flush << endl; start_gpu = clock(); mul_gpu<<< grid, block >>>(cuda_A, cuda_B, cuda_C, size); end_gpu = clock(); // cout << "pred synchronizaci kernelu" << flush << endl; cudaThreadSynchronize(); // cout << "pred dolovanim vysledku" << flush << endl; // cudaMemcpy(C, cuda_C, sizeof(int)*size*size, cudaMemcpyDeviceToHost); for (int i = 0; i < size; i++) { // cout << "pruchod: " << i << flush << endl; cudaMemcpy(C[i], &cuda_C[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); } // cout << "pred uvolneni pameti" << flush << endl; cudaFree(cuda_A); cudaFree(cuda_B); cudaFree(cuda_C); // cout << "pred ukoncenim" << flush << endl; end = clock(); cout << "Running for " << (double)(end-start)/CLOCKS_PER_SEC << endl << flush; cout << "GPU running for " << (double)(end_gpu-start_gpu)/CLOCKS_PER_SEC << endl << flush; }
006701ec87496298b147518220b2636054b89807.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2020 by Contributors * \file array/cuda/segment_reduce.cu * \brief Segment reduce C APIs and definitions. */ #include <dgl/array.h> #include "./segment_reduce.cuh" #include "functor.cuh" #include "./utils.h" namespace dgl { using namespace cuda; namespace aten { template <int XPU, typename IdType, int bits> void SegmentReduce(const std::string& op, NDArray feat, NDArray offsets, NDArray out, NDArray arg) { SWITCH_BITS(bits, DType, { if (op == "sum") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Sum<IdType, DType>>( feat, offsets, out, arg); } else if (op == "max") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Max<IdType, DType>>( feat, offsets, out, arg); } else if (op == "min") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Min<IdType, DType>>( feat, offsets, out, arg); } else { LOG(FATAL) << "Not implemented"; } }); } template <int XPU, typename IdType, int bits> void BackwardSegmentCmp(NDArray feat, NDArray arg, NDArray out) { SWITCH_BITS(bits, DType, { cuda::BackwardSegmentCmp<IdType, DType>(feat, arg, out); }); } template void SegmentReduce<kDLGPU, int32_t, 16>( const std::string& op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int64_t, 16>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int32_t, 32>( const std::string& op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int64_t, 32>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int32_t, 64>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int64_t, 64>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void BackwardSegmentCmp<kDLGPU, int32_t, 16>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int64_t, 16>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int32_t, 32>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int64_t, 32>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int32_t, 64>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int64_t, 64>( NDArray feat, NDArray arg, NDArray out); } // namespace aten } // namespace dgl
006701ec87496298b147518220b2636054b89807.cu
/*! * Copyright (c) 2020 by Contributors * \file array/cuda/segment_reduce.cu * \brief Segment reduce C APIs and definitions. */ #include <dgl/array.h> #include "./segment_reduce.cuh" #include "./functor.cuh" #include "./utils.h" namespace dgl { using namespace cuda; namespace aten { template <int XPU, typename IdType, int bits> void SegmentReduce(const std::string& op, NDArray feat, NDArray offsets, NDArray out, NDArray arg) { SWITCH_BITS(bits, DType, { if (op == "sum") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Sum<IdType, DType>>( feat, offsets, out, arg); } else if (op == "max") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Max<IdType, DType>>( feat, offsets, out, arg); } else if (op == "min") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Min<IdType, DType>>( feat, offsets, out, arg); } else { LOG(FATAL) << "Not implemented"; } }); } template <int XPU, typename IdType, int bits> void BackwardSegmentCmp(NDArray feat, NDArray arg, NDArray out) { SWITCH_BITS(bits, DType, { cuda::BackwardSegmentCmp<IdType, DType>(feat, arg, out); }); } template void SegmentReduce<kDLGPU, int32_t, 16>( const std::string& op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int64_t, 16>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int32_t, 32>( const std::string& op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int64_t, 32>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int32_t, 64>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int64_t, 64>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void BackwardSegmentCmp<kDLGPU, int32_t, 16>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int64_t, 16>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int32_t, 32>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int64_t, 32>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int32_t, 64>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int64_t, 64>( NDArray feat, NDArray arg, NDArray out); } // namespace aten } // namespace dgl
b09a30520a4cfba847bda65a0e5b9b8592343c92.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "update2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *alphaMinusBeta_out = NULL; hipMalloc(&alphaMinusBeta_out, XSIZE*YSIZE); const float *rho = NULL; hipMalloc(&rho, XSIZE*YSIZE); const float *yDotZ = NULL; hipMalloc(&yDotZ, XSIZE*YSIZE); const float *alpha = NULL; hipMalloc(&alpha, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( update2), dim3(gridBlock),dim3(threadBlock), 0, 0, alphaMinusBeta_out,rho,yDotZ,alpha); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( update2), dim3(gridBlock),dim3(threadBlock), 0, 0, alphaMinusBeta_out,rho,yDotZ,alpha); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( update2), dim3(gridBlock),dim3(threadBlock), 0, 0, alphaMinusBeta_out,rho,yDotZ,alpha); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b09a30520a4cfba847bda65a0e5b9b8592343c92.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "update2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *alphaMinusBeta_out = NULL; cudaMalloc(&alphaMinusBeta_out, XSIZE*YSIZE); const float *rho = NULL; cudaMalloc(&rho, XSIZE*YSIZE); const float *yDotZ = NULL; cudaMalloc(&yDotZ, XSIZE*YSIZE); const float *alpha = NULL; cudaMalloc(&alpha, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); update2<<<gridBlock,threadBlock>>>(alphaMinusBeta_out,rho,yDotZ,alpha); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { update2<<<gridBlock,threadBlock>>>(alphaMinusBeta_out,rho,yDotZ,alpha); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { update2<<<gridBlock,threadBlock>>>(alphaMinusBeta_out,rho,yDotZ,alpha); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
17b041540e09e2fae9e504b9f9ce7cd730398656.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../common.h" #define NO_MAIN #include "1-baseline-host.cc" __global__ void matmul_dev(const float *A, const float *B, float *C, const int M, const int N, const int K) { int m = blockIdx.y * blockDim.y + threadIdx.y; int n = blockIdx.x * blockDim.x + threadIdx.x; if (m < M && n < N) { float x = 0.f; for (int k = 0; k < K; k++) { x += A[m * K + k] * B[k * N + n]; } C[m * N + n] = x; } } __global__ void matmul_dev_k_interchanged(const float *A, const float *B, float *C, const int M, const int N, const int K) { int k = blockIdx.y * blockDim.y + threadIdx.y; int m = blockIdx.x * blockDim.x + threadIdx.x; if (k < K && m < M) { for (int n = 0; n < N; n++) { // band conflict? C[m * N + n] += A[m * K + k] * B[k * N + n]; } } } __global__ void matmul_dev_continuous_row0(const float *A, const float *B, float *C, const int M, const int N, const int K) { const int m = blockIdx.y * blockDim.y + threadIdx.y; const int n = blockIdx.x * blockDim.x + threadIdx.x; if (m < M && n < N) { float x{}; for (int k = 0; k < K; k += 4) { // all threads in the same wrap access the same elements each time. auto *aa = reinterpret_cast<const float4 *>(&A[m * K + k]); // unroll x += aa->x * B[k * N + n]; x += aa->y * B[(k + 1) * N + n]; x += aa->z * B[(k + 2) * N + n]; x += aa->w * B[(k + 3) * N + n]; } } } __global__ void matmul_dev_continuous_row1(const float *A, const float *B, float *C, const int M, const int N, const int K) { const int m = blockIdx.y * blockDim.y + threadIdx.y; const int n = blockIdx.x * blockDim.x + threadIdx.x; if (m < M && n < N) { float x{}; for (int k = 0; k < K; k += 4) { // all threads in the same wrap access the same elements each time. float4 aa = reinterpret_cast<const float4 *>(&A[m * K + k])[0]; // unroll x += aa.x * B[k * N + n]; x += aa.y * B[(k + 1) * N + n]; x += aa.z * B[(k + 2) * N + n]; x += aa.w * B[(k + 3) * N + n]; } } } __global__ void matmul_dev_continuous_row2(const float *A, const float *B, float *C, const int M, const int N, const int K) { const int m = blockIdx.y * blockDim.y + threadIdx.y; const int n = blockIdx.x * blockDim.x + threadIdx.x; if (m < M && n < N) { float x{}; #pragma unroll for (int k = 0; k < K; k += 4) { // all threads in the same wrap access the same elements each time. float4 aa = reinterpret_cast<const float4 *>(&A[m * K + k])[0]; // unroll x += aa.x * B[k * N + n]; x += aa.y * B[(k + 1) * N + n]; x += aa.z * B[(k + 2) * N + n]; x += aa.w * B[(k + 3) * N + n]; } } } // 16 x 16 floats const int share_size = 16 * 16; __global__ void matmul_dev_tile(const float *A, const float *B, float *C, const int M, const int N, const int K) { __shared__ float a[share_size]; __shared__ float b[share_size]; // shorten for reuse const int tx = threadIdx.x; const int ty = threadIdx.y; const int dim = blockDim.x; const int m = blockIdx.y * blockDim.y + ty; // row const int n = blockIdx.x * blockDim.x + tx; // column if (m >= M || n >= N) return; float tmp{}; // i is the index of N's tile for (int i = 0; i < (N / dim); i++) { // Load a vector from A and B to a and b. a[ty * dim + tx] = A[m * N + (i * dim + tx)]; b[ty * dim + tx] = B[(i*dim+ty) * N + n]; // Ensure all the treads in the same block has finished loading data. __syncthreads(); // Calculate the result of this tile. for (int k = 0; k < tx; k++) { tmp += a[ty * dim + k] * b[k * dim + tx]; } __syncthreads(); } // write back the result C[m * N + n] = tmp; } __global__ void matmul_dev_tile_B_collapse(const float *A, const float *B, float *C, const int M, const int N, const int K) { __shared__ float a[share_size]; __shared__ float b[share_size]; // shorten for reuse const int tx = threadIdx.x; const int ty = threadIdx.y; const int dim = blockDim.x; const int m = blockIdx.y * blockDim.y + ty; // row const int n = blockIdx.x * blockDim.x + tx; // column if (m >= M || n >= N) return; float tmp{}; // i is the index of N's tile for (int i = 0; i < (K / dim); i++) { // Load a vector from A and B to a and b. a[ty * dim + tx] = A[m * N + (i * dim + tx)]; b[ty * dim + tx] = B[(i*dim+ty) * N + n]; // Ensure all the treads in the same block has finished loading data. __syncthreads(); // Calculate the result of this tile. for (int k = 0; k < tx; k++) { tmp += a[ty * dim + k] * b[k * dim + tx]; } __syncthreads(); } // write back the result C[m * N + n] = tmp; } // Transpose matrix from shape M x N to N x M __global__ void TransposeMatrix(float* A, const int M, const int N) { } int main() { std::vector<float> A_host, B_host, C_host; auto A = CreateDeviceVector(M * K, &A_host, true); auto B = CreateDeviceVector(K * N, &B_host, true); auto C = CreateDeviceVector(M * N, &C_host); { const int THREADS = 32; const int BLOCKS = N / THREADS; dim3 threads(THREADS, THREADS); dim3 blocks(BLOCKS, BLOCKS); hipLaunchKernelGGL(( matmul_dev), dim3(blocks), dim3(threads), 0, 0, A, B, C, M, N, K); matmul_host(A_host.data(), B_host.data(), C_host.data(), M, N, K); auto res = VerifyDeviceResult(C_host.data(), C, M, N); std::cerr << "res: " << res << std::endl; for (int i = 0; i < REPEAT; i++) { hipLaunchKernelGGL(( matmul_dev), dim3(blocks), dim3(threads), 0, 0, A, B, C, M, N, K); } } { const int THREADS = 32; const int BLOCKS = K / THREADS; dim3 threads(THREADS, THREADS); dim3 blocks(BLOCKS, BLOCKS); ClearDeviceVector(C, M * N); hipLaunchKernelGGL(( matmul_dev_k_interchanged), dim3(blocks), dim3(threads), 0, 0, A, B, C, M, N, K); auto res = VerifyDeviceResult(C_host.data(), C, M, N); std::cerr << "res: " << res << std::endl; for (int i = 0; i < REPEAT; i++) { hipLaunchKernelGGL(( matmul_dev_k_interchanged), dim3(blocks), dim3(threads), 0, 0, A, B, C, M, N, K); } } #define PROFILE_KERNEL(kernel__) \ { \ const int THREADS = 32; \ const int BLOCKS = K / THREADS; \ dim3 threads(THREADS, THREADS);\ dim3 blocks(BLOCKS, BLOCKS); \ \ ClearDeviceVector(C, M * N);\ hipLaunchKernelGGL(( matmul_dev_continuous_row1), dim3(blocks), dim3(threads), 0, 0, A, B, C, M, N, K);\ auto res = VerifyDeviceResult(C_host.data(), C, M, N);\ std::cerr << "res: " << res << std::endl;\ \ for (int i = 0; i < REPEAT; i++) {\ hipLaunchKernelGGL(( kernel__) , dim3(blocks), dim3(threads), 0, 0, A, B, C, M, N, K);\ }\ } PROFILE_KERNEL(matmul_dev_continuous_row0) PROFILE_KERNEL(matmul_dev_continuous_row1) PROFILE_KERNEL(matmul_dev_continuous_row2) { const int BLOCKS = 16; const int GRIDS = (N + BLOCKS - 1) / BLOCKS; const int GRIDS1 = (M + BLOCKS - 1) / BLOCKS; dim3 threads(BLOCKS, BLOCKS); // each block has 16 x 16 threads, that is same as shared_memory size dim3 blocks(GRIDS, GRIDS1); ClearDeviceVector(C, M * N); hipLaunchKernelGGL(( matmul_dev_tile), dim3(blocks), dim3(threads), 0, 0, A, B, C, M, N, K); auto res = VerifyDeviceResult(C_host.data(), C, M, N); std::cerr << "res: " << res << std::endl; for (int i = 0; i < REPEAT; i++) { hipLaunchKernelGGL(( matmul_dev_tile), dim3(blocks), dim3(threads), 0, 0, A, B, C, M, N, K); } } DestroyDeviceVector(A); DestroyDeviceVector(B); DestroyDeviceVector(C); return 0; }
17b041540e09e2fae9e504b9f9ce7cd730398656.cu
#include "../common.h" #define NO_MAIN #include "1-baseline-host.cc" __global__ void matmul_dev(const float *A, const float *B, float *C, const int M, const int N, const int K) { int m = blockIdx.y * blockDim.y + threadIdx.y; int n = blockIdx.x * blockDim.x + threadIdx.x; if (m < M && n < N) { float x = 0.f; for (int k = 0; k < K; k++) { x += A[m * K + k] * B[k * N + n]; } C[m * N + n] = x; } } __global__ void matmul_dev_k_interchanged(const float *A, const float *B, float *C, const int M, const int N, const int K) { int k = blockIdx.y * blockDim.y + threadIdx.y; int m = blockIdx.x * blockDim.x + threadIdx.x; if (k < K && m < M) { for (int n = 0; n < N; n++) { // band conflict? C[m * N + n] += A[m * K + k] * B[k * N + n]; } } } __global__ void matmul_dev_continuous_row0(const float *A, const float *B, float *C, const int M, const int N, const int K) { const int m = blockIdx.y * blockDim.y + threadIdx.y; const int n = blockIdx.x * blockDim.x + threadIdx.x; if (m < M && n < N) { float x{}; for (int k = 0; k < K; k += 4) { // all threads in the same wrap access the same elements each time. auto *aa = reinterpret_cast<const float4 *>(&A[m * K + k]); // unroll x += aa->x * B[k * N + n]; x += aa->y * B[(k + 1) * N + n]; x += aa->z * B[(k + 2) * N + n]; x += aa->w * B[(k + 3) * N + n]; } } } __global__ void matmul_dev_continuous_row1(const float *A, const float *B, float *C, const int M, const int N, const int K) { const int m = blockIdx.y * blockDim.y + threadIdx.y; const int n = blockIdx.x * blockDim.x + threadIdx.x; if (m < M && n < N) { float x{}; for (int k = 0; k < K; k += 4) { // all threads in the same wrap access the same elements each time. float4 aa = reinterpret_cast<const float4 *>(&A[m * K + k])[0]; // unroll x += aa.x * B[k * N + n]; x += aa.y * B[(k + 1) * N + n]; x += aa.z * B[(k + 2) * N + n]; x += aa.w * B[(k + 3) * N + n]; } } } __global__ void matmul_dev_continuous_row2(const float *A, const float *B, float *C, const int M, const int N, const int K) { const int m = blockIdx.y * blockDim.y + threadIdx.y; const int n = blockIdx.x * blockDim.x + threadIdx.x; if (m < M && n < N) { float x{}; #pragma unroll for (int k = 0; k < K; k += 4) { // all threads in the same wrap access the same elements each time. float4 aa = reinterpret_cast<const float4 *>(&A[m * K + k])[0]; // unroll x += aa.x * B[k * N + n]; x += aa.y * B[(k + 1) * N + n]; x += aa.z * B[(k + 2) * N + n]; x += aa.w * B[(k + 3) * N + n]; } } } // 16 x 16 floats const int share_size = 16 * 16; __global__ void matmul_dev_tile(const float *A, const float *B, float *C, const int M, const int N, const int K) { __shared__ float a[share_size]; __shared__ float b[share_size]; // shorten for reuse const int tx = threadIdx.x; const int ty = threadIdx.y; const int dim = blockDim.x; const int m = blockIdx.y * blockDim.y + ty; // row const int n = blockIdx.x * blockDim.x + tx; // column if (m >= M || n >= N) return; float tmp{}; // i is the index of N's tile for (int i = 0; i < (N / dim); i++) { // Load a vector from A and B to a and b. a[ty * dim + tx] = A[m * N + (i * dim + tx)]; b[ty * dim + tx] = B[(i*dim+ty) * N + n]; // Ensure all the treads in the same block has finished loading data. __syncthreads(); // Calculate the result of this tile. for (int k = 0; k < tx; k++) { tmp += a[ty * dim + k] * b[k * dim + tx]; } __syncthreads(); } // write back the result C[m * N + n] = tmp; } __global__ void matmul_dev_tile_B_collapse(const float *A, const float *B, float *C, const int M, const int N, const int K) { __shared__ float a[share_size]; __shared__ float b[share_size]; // shorten for reuse const int tx = threadIdx.x; const int ty = threadIdx.y; const int dim = blockDim.x; const int m = blockIdx.y * blockDim.y + ty; // row const int n = blockIdx.x * blockDim.x + tx; // column if (m >= M || n >= N) return; float tmp{}; // i is the index of N's tile for (int i = 0; i < (K / dim); i++) { // Load a vector from A and B to a and b. a[ty * dim + tx] = A[m * N + (i * dim + tx)]; b[ty * dim + tx] = B[(i*dim+ty) * N + n]; // Ensure all the treads in the same block has finished loading data. __syncthreads(); // Calculate the result of this tile. for (int k = 0; k < tx; k++) { tmp += a[ty * dim + k] * b[k * dim + tx]; } __syncthreads(); } // write back the result C[m * N + n] = tmp; } // Transpose matrix from shape M x N to N x M __global__ void TransposeMatrix(float* A, const int M, const int N) { } int main() { std::vector<float> A_host, B_host, C_host; auto A = CreateDeviceVector(M * K, &A_host, true); auto B = CreateDeviceVector(K * N, &B_host, true); auto C = CreateDeviceVector(M * N, &C_host); { const int THREADS = 32; const int BLOCKS = N / THREADS; dim3 threads(THREADS, THREADS); dim3 blocks(BLOCKS, BLOCKS); matmul_dev<<<blocks, threads>>>(A, B, C, M, N, K); matmul_host(A_host.data(), B_host.data(), C_host.data(), M, N, K); auto res = VerifyDeviceResult(C_host.data(), C, M, N); std::cerr << "res: " << res << std::endl; for (int i = 0; i < REPEAT; i++) { matmul_dev<<<blocks, threads>>>(A, B, C, M, N, K); } } { const int THREADS = 32; const int BLOCKS = K / THREADS; dim3 threads(THREADS, THREADS); dim3 blocks(BLOCKS, BLOCKS); ClearDeviceVector(C, M * N); matmul_dev_k_interchanged<<<blocks, threads>>>(A, B, C, M, N, K); auto res = VerifyDeviceResult(C_host.data(), C, M, N); std::cerr << "res: " << res << std::endl; for (int i = 0; i < REPEAT; i++) { matmul_dev_k_interchanged<<<blocks, threads>>>(A, B, C, M, N, K); } } #define PROFILE_KERNEL(kernel__) \ { \ const int THREADS = 32; \ const int BLOCKS = K / THREADS; \ dim3 threads(THREADS, THREADS);\ dim3 blocks(BLOCKS, BLOCKS); \ \ ClearDeviceVector(C, M * N);\ matmul_dev_continuous_row1<<<blocks, threads>>>(A, B, C, M, N, K);\ auto res = VerifyDeviceResult(C_host.data(), C, M, N);\ std::cerr << "res: " << res << std::endl;\ \ for (int i = 0; i < REPEAT; i++) {\ kernel__ <<<blocks, threads>>>(A, B, C, M, N, K);\ }\ } PROFILE_KERNEL(matmul_dev_continuous_row0) PROFILE_KERNEL(matmul_dev_continuous_row1) PROFILE_KERNEL(matmul_dev_continuous_row2) { const int BLOCKS = 16; const int GRIDS = (N + BLOCKS - 1) / BLOCKS; const int GRIDS1 = (M + BLOCKS - 1) / BLOCKS; dim3 threads(BLOCKS, BLOCKS); // each block has 16 x 16 threads, that is same as shared_memory size dim3 blocks(GRIDS, GRIDS1); ClearDeviceVector(C, M * N); matmul_dev_tile<<<blocks, threads>>>(A, B, C, M, N, K); auto res = VerifyDeviceResult(C_host.data(), C, M, N); std::cerr << "res: " << res << std::endl; for (int i = 0; i < REPEAT; i++) { matmul_dev_tile<<<blocks, threads>>>(A, B, C, M, N, K); } } DestroyDeviceVector(A); DestroyDeviceVector(B); DestroyDeviceVector(C); return 0; }
c1fe9ccb8b673426d0f391611773e027a61c3444.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "tanhDeriv_f32.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *vector = NULL; hipMalloc(&vector, XSIZE*YSIZE); float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( tanhDeriv_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,output,len); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( tanhDeriv_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,output,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( tanhDeriv_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,output,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c1fe9ccb8b673426d0f391611773e027a61c3444.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "tanhDeriv_f32.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *vector = NULL; cudaMalloc(&vector, XSIZE*YSIZE); float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); tanhDeriv_f32<<<gridBlock,threadBlock>>>(vector,output,len); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { tanhDeriv_f32<<<gridBlock,threadBlock>>>(vector,output,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { tanhDeriv_f32<<<gridBlock,threadBlock>>>(vector,output,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6985e07bc6692d10807191308ce67e262523484a.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2022 The Microsoft DeepSpeed Team */ #include <limits> #include "inference_cuda_layers.h" #ifndef __HIP_PLATFORM_HCC__ #include <hip/hip_runtime_api.h> #endif #include <cstdio> #include <cstdlib> #include <ctime> #define ATTN_THREADS 256 #define MAX_REG_SIZE 8 #define minus_infinity -10000.0 void CheckCudaErrorAux(const char* file, unsigned line) { hipError_t err = hipGetLastError(); if (err == hipSuccess) return; std::cerr << hipGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl; throw std::runtime_error("CUDA ERROR!!!\n"); } #define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) namespace cg = cooperative_groups; __global__ void attn_softmax_v2(__half* vals, __half* mask, __half* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, int head_offset, int mask_stride, int mp_size, int iterations, int reduceWidth) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float2 low_data[MAX_REG_SIZE]; float2 high_data[MAX_REG_SIZE]; const __half zero_h = __float2half(0.f); int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); int batch_idx = iter_offset / (num_seq * heads); int alibi_offset = batch_idx * heads * mp_size + head_offset; int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); alibi_offset = (alibi_offset + ((iter_offset / num_seq) % heads)) * sequence_length; mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; // if (lane == 0) printf("%d, %d: %d \n", wid, blockIdx.x, mask_offset); for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) * layer_scale : minus_infinity; low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? __half2float(vals[data_id + 3]) * layer_scale : minus_infinity; if (alibi) { low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); low_data[i].y = low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); high_data[i].x = high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); high_data[i].y = high_data[i].y + __half2float(alibi[data_id + alibi_offset + 3]); } if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); } } else { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) * layer_scale : minus_infinity; low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && (data_id + 1) > window_stride) && (data_id + 1) < sequence_length) ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && (data_id + 2) > window_stride) && (data_id + 2) < sequence_length) ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; if (alibi) { low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); if ((data_id + 1) < sequence_length) low_data[i].y = low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); if ((data_id + 2) < sequence_length) high_data[i].x = high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); } high_data[i].y = minus_infinity; if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); if ((data_id + 1) < sequence_length) low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); if ((data_id + 2) < sequence_length) high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); } } // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); } else { low_data[i].x = minus_infinity; low_data[i].y = minus_infinity; high_data[i].x = minus_infinity; high_data[i].y = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { low_data[i].x = __expf(low_data[i].x - max_val); low_data[i].y = __expf(low_data[i].y - max_val); high_data[i].x = __expf(high_data[i].x - max_val); high_data[i].y = __expf(high_data[i].y - max_val); sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = __float2half(low_data[i].x / sum); vals[data_id + 1] = __float2half(low_data[i].y / sum); vals[data_id + 2] = __float2half(high_data[i].x / sum); vals[data_id + 3] = __float2half(high_data[i].y / sum); } else { vals[data_id] = __float2half(low_data[i].x / sum); if ((data_id + 1) < sequence_length) vals[data_id + 1] = __float2half(low_data[i].y / sum); if ((data_id + 2) < sequence_length) vals[data_id + 2] = __float2half(high_data[i].x / sum); } } } } } __global__ void attn_softmax_v2(float* vals, float* attn_mask, float* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, int head_offset, int mask_stride, int mp_size, int iterations, int reduceWidth) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float4 data[MAX_REG_SIZE]; int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); int batch_idx = iter_offset / (num_seq * heads); int alibi_offset = batch_idx * heads * mp_size + head_offset; int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { data[i].x = (data_id > window_stride ? vals[data_id] : minus_infinity); data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? vals[data_id + 1] : minus_infinity; data[i].z = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? vals[data_id + 2] : minus_infinity; data[i].w = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? vals[data_id + 3] : minus_infinity; if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; data[i].y += attn_mask[data_id + mask_offset + 1]; data[i].z += attn_mask[data_id + mask_offset + 2]; data[i].w += attn_mask[data_id + mask_offset + 3]; } } else { data[i].x = data_id > window_stride ? vals[data_id] : minus_infinity; data[i].y = (((!triangular || (data_id + 1) <= seq_id)) && (data_id + 1) > window_stride && (data_id + 1) < sequence_length) ? (vals[data_id + 1]) : minus_infinity; data[i].z = (((!triangular || (data_id + 2) <= seq_id)) && (data_id + 2) > window_stride && (data_id + 2) < sequence_length) ? (vals[data_id + 2]) : minus_infinity; data[i].w = minus_infinity; if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; if ((data_id + 1) < sequence_length) data[i].y += attn_mask[data_id + mask_offset + 1]; if ((data_id + 2) < sequence_length) data[i].z += attn_mask[data_id + mask_offset + 2]; } } max_val = (data[i].x > max_val ? data[i].x : max_val); max_val = (data[i].y > max_val ? data[i].y : max_val); max_val = (data[i].z > max_val ? data[i].z : max_val); max_val = (data[i].w > max_val ? data[i].w : max_val); } else { data[i].x = minus_infinity; data[i].y = minus_infinity; data[i].z = minus_infinity; data[i].w = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { data[i].x = __expf(data[i].x - max_val); data[i].y = __expf(data[i].y - max_val); data[i].z = __expf(data[i].z - max_val); data[i].w = __expf(data[i].w - max_val); sum += (data[i].x + data[i].y + data[i].z + data[i].w); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = data[i].x / sum; vals[data_id + 1] = data[i].y / sum; vals[data_id + 2] = data[i].z / sum; vals[data_id + 3] = data[i].w / sum; } else { vals[data_id] = data[i].x / sum; if ((data_id + 1) < sequence_length) vals[data_id + 1] = data[i].y / sum; if ((data_id + 2) < sequence_length) vals[data_id + 2] = data[i].z / sum; } } } } } template <typename T> void launch_attn_softmax_v2(T* vals, T* mask, T* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, hipStream_t stream) { int total_count = batch_size * heads * num_seq; int warp_num = ATTN_THREADS / WARP_SIZE; int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1); reduce_width = (int)pow(2.0, floor(log2((float)(reduce_width)))) * WARP_SIZE; dim3 grid_dim((total_count - 1) / (ATTN_THREADS / reduce_width) + 1); dim3 block_dim(ATTN_THREADS); const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; if (sequence_length <= 32768) hipLaunchKernelGGL(( attn_softmax_v2), dim3(grid_dim), dim3(block_dim), 0, stream, vals, mask, alibi, layer_scale, triangular, recompute, local_attention, window_size, total_count, heads, sequence_length, num_seq, head_offset, mask_stride, mp_size, iterations, reduce_width); else throw std::runtime_error("Unsupport Seq_Length!"); } template void launch_attn_softmax_v2(float* vals, float* mask, float* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, hipStream_t stream); template void launch_attn_softmax_v2(__half* vals, __half* mask, __half* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, hipStream_t stream);
6985e07bc6692d10807191308ce67e262523484a.cu
/* Copyright 2022 The Microsoft DeepSpeed Team */ #include <limits> #include "inference_cuda_layers.h" #ifndef __HIP_PLATFORM_HCC__ #include <cuda_profiler_api.h> #endif #include <cstdio> #include <cstdlib> #include <ctime> #define ATTN_THREADS 256 #define MAX_REG_SIZE 8 #define minus_infinity -10000.0 void CheckCudaErrorAux(const char* file, unsigned line) { cudaError_t err = cudaGetLastError(); if (err == cudaSuccess) return; std::cerr << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl; throw std::runtime_error("CUDA ERROR!!!\n"); } #define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) namespace cg = cooperative_groups; __global__ void attn_softmax_v2(__half* vals, __half* mask, __half* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, int head_offset, int mask_stride, int mp_size, int iterations, int reduceWidth) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float2 low_data[MAX_REG_SIZE]; float2 high_data[MAX_REG_SIZE]; const __half zero_h = __float2half(0.f); int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); int batch_idx = iter_offset / (num_seq * heads); int alibi_offset = batch_idx * heads * mp_size + head_offset; int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); alibi_offset = (alibi_offset + ((iter_offset / num_seq) % heads)) * sequence_length; mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; // if (lane == 0) printf("%d, %d: %d \n", wid, blockIdx.x, mask_offset); for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) * layer_scale : minus_infinity; low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? __half2float(vals[data_id + 3]) * layer_scale : minus_infinity; if (alibi) { low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); low_data[i].y = low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); high_data[i].x = high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); high_data[i].y = high_data[i].y + __half2float(alibi[data_id + alibi_offset + 3]); } if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); } } else { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) * layer_scale : minus_infinity; low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && (data_id + 1) > window_stride) && (data_id + 1) < sequence_length) ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && (data_id + 2) > window_stride) && (data_id + 2) < sequence_length) ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; if (alibi) { low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); if ((data_id + 1) < sequence_length) low_data[i].y = low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); if ((data_id + 2) < sequence_length) high_data[i].x = high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); } high_data[i].y = minus_infinity; if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); if ((data_id + 1) < sequence_length) low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); if ((data_id + 2) < sequence_length) high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); } } // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); } else { low_data[i].x = minus_infinity; low_data[i].y = minus_infinity; high_data[i].x = minus_infinity; high_data[i].y = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { low_data[i].x = __expf(low_data[i].x - max_val); low_data[i].y = __expf(low_data[i].y - max_val); high_data[i].x = __expf(high_data[i].x - max_val); high_data[i].y = __expf(high_data[i].y - max_val); sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = __float2half(low_data[i].x / sum); vals[data_id + 1] = __float2half(low_data[i].y / sum); vals[data_id + 2] = __float2half(high_data[i].x / sum); vals[data_id + 3] = __float2half(high_data[i].y / sum); } else { vals[data_id] = __float2half(low_data[i].x / sum); if ((data_id + 1) < sequence_length) vals[data_id + 1] = __float2half(low_data[i].y / sum); if ((data_id + 2) < sequence_length) vals[data_id + 2] = __float2half(high_data[i].x / sum); } } } } } __global__ void attn_softmax_v2(float* vals, float* attn_mask, float* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, int head_offset, int mask_stride, int mp_size, int iterations, int reduceWidth) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float4 data[MAX_REG_SIZE]; int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); int batch_idx = iter_offset / (num_seq * heads); int alibi_offset = batch_idx * heads * mp_size + head_offset; int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { data[i].x = (data_id > window_stride ? vals[data_id] : minus_infinity); data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? vals[data_id + 1] : minus_infinity; data[i].z = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? vals[data_id + 2] : minus_infinity; data[i].w = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? vals[data_id + 3] : minus_infinity; if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; data[i].y += attn_mask[data_id + mask_offset + 1]; data[i].z += attn_mask[data_id + mask_offset + 2]; data[i].w += attn_mask[data_id + mask_offset + 3]; } } else { data[i].x = data_id > window_stride ? vals[data_id] : minus_infinity; data[i].y = (((!triangular || (data_id + 1) <= seq_id)) && (data_id + 1) > window_stride && (data_id + 1) < sequence_length) ? (vals[data_id + 1]) : minus_infinity; data[i].z = (((!triangular || (data_id + 2) <= seq_id)) && (data_id + 2) > window_stride && (data_id + 2) < sequence_length) ? (vals[data_id + 2]) : minus_infinity; data[i].w = minus_infinity; if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; if ((data_id + 1) < sequence_length) data[i].y += attn_mask[data_id + mask_offset + 1]; if ((data_id + 2) < sequence_length) data[i].z += attn_mask[data_id + mask_offset + 2]; } } max_val = (data[i].x > max_val ? data[i].x : max_val); max_val = (data[i].y > max_val ? data[i].y : max_val); max_val = (data[i].z > max_val ? data[i].z : max_val); max_val = (data[i].w > max_val ? data[i].w : max_val); } else { data[i].x = minus_infinity; data[i].y = minus_infinity; data[i].z = minus_infinity; data[i].w = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { data[i].x = __expf(data[i].x - max_val); data[i].y = __expf(data[i].y - max_val); data[i].z = __expf(data[i].z - max_val); data[i].w = __expf(data[i].w - max_val); sum += (data[i].x + data[i].y + data[i].z + data[i].w); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = data[i].x / sum; vals[data_id + 1] = data[i].y / sum; vals[data_id + 2] = data[i].z / sum; vals[data_id + 3] = data[i].w / sum; } else { vals[data_id] = data[i].x / sum; if ((data_id + 1) < sequence_length) vals[data_id + 1] = data[i].y / sum; if ((data_id + 2) < sequence_length) vals[data_id + 2] = data[i].z / sum; } } } } } template <typename T> void launch_attn_softmax_v2(T* vals, T* mask, T* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, cudaStream_t stream) { int total_count = batch_size * heads * num_seq; int warp_num = ATTN_THREADS / WARP_SIZE; int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1); reduce_width = (int)pow(2.0, floor(log2((float)(reduce_width)))) * WARP_SIZE; dim3 grid_dim((total_count - 1) / (ATTN_THREADS / reduce_width) + 1); dim3 block_dim(ATTN_THREADS); const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; if (sequence_length <= 32768) attn_softmax_v2<<<grid_dim, block_dim, 0, stream>>>(vals, mask, alibi, layer_scale, triangular, recompute, local_attention, window_size, total_count, heads, sequence_length, num_seq, head_offset, mask_stride, mp_size, iterations, reduce_width); else throw std::runtime_error("Unsupport Seq_Length!"); } template void launch_attn_softmax_v2(float* vals, float* mask, float* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, cudaStream_t stream); template void launch_attn_softmax_v2(__half* vals, __half* mask, __half* alibi, float layer_scale, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, int head_offset, int mask_stride, int mp_size, cudaStream_t stream);
d72f00bd7a71fc8249652ce0517b4deadea4d4b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "heap_with_aux.cuh" #include "sssp_config.cuh" #define VERT(x) ((x) & 0xffffffff) #define DISTANCE(x) ((x) >> 32) #define MAKE_KEY(vert, distance) ((((unsigned long long)(distance)) << 32) + (vert)) __device__ bool update_offset(unsigned long long * p_key, int * p_offset, const void * vp_edge_list_index){ const int * edge_list_index = reinterpret_cast < const int * >(vp_edge_list_index); int edge_count = edge_list_index[VERT(*p_key) + 1] - edge_list_index[VERT(*p_key)]; if(edge_count - *p_offset > CONFIG_CHUNK_SIZE){ *p_offset += CONFIG_CHUNK_SIZE; return true; } else { return false; } } __global__ void ssspKernel(Heap_With_Aux < unsigned long long, int > * heap, const int * edge_list_index, const int * edge_dst, const int * edge_weight, int * distance, unsigned long long * inserted_nodes, volatile int * term_sig){ extern __shared__ char smem[]; int curr_smem_offset = 0; DECLARE_SMEM(int, p_size, 1); DECLARE_SMEM(int, p_total_task, 1); DECLARE_SMEM(int, p_valid_size, 1); DECLARE_SMEM(int, p_batch_offset, CONFIG_BATCH_SIZE); DECLARE_SMEM(int, p_rem_edge_count, CONFIG_BATCH_SIZE); //DECLARE_SMEM(int, p_task_psum, CONFIG_BATCH_SIZE); DECLARE_SMEM(int, p_inserted_node_count, 1); DECLARE_SMEM(int, p_should_exit, 1); ALIGN_SMEM_8; DECLARE_SMEM(unsigned long long, p_batch, CONFIG_BATCH_SIZE); DECLARE_SMEM(unsigned long long, p_valid_batch, CONFIG_BATCH_SIZE); //DECLARE_SMEM(unsigned long long, p_inserted_nodes, 0); unsigned long long *p_inserted_nodes = inserted_nodes + blockIdx.x * CONFIG_BATCH_SIZE * CONFIG_CHUNK_SIZE; do{ SINGLE_THREADED {*p_size = CONFIG_BATCH_SIZE;} __syncthreads(); heap->retrieve(p_batch, p_batch_offset, p_size, update_offset, edge_list_index, curr_smem_offset); SINGLE_THREADED { *p_total_task = 0; *p_inserted_node_count = 0; *p_valid_size = 0; } __syncthreads(); for(int i = threadIdx.x;i < *p_size;i += blockDim.x){ if(DISTANCE(p_batch[i]) <= distance[VERT(p_batch[i])]){ int new_idx = atomicAdd(p_valid_size, 1); p_valid_batch[new_idx] = p_batch[i]; int remaining_edge = edge_list_index[VERT(p_batch[i]) + 1] - edge_list_index[VERT(p_batch[i])] - p_batch_offset[i]; if(remaining_edge > CONFIG_CHUNK_SIZE) {remaining_edge = CONFIG_CHUNK_SIZE;} p_rem_edge_count[new_idx] = remaining_edge; atomicAdd(p_total_task, remaining_edge); } } __syncthreads(); if(*p_valid_size == 0) { SINGLE_THREADED { *p_should_exit = 1; term_sig[blockIdx.x] = 1; for(int i = 0;i < gridDim.x;++i){ if(term_sig[i] == 0) {*p_should_exit = 0;break;} } } __syncthreads(); if(*p_should_exit) {return;} else {continue;} } else { SINGLE_THREADED {term_sig[blockIdx.x] = 0;} } //Parallel scan from https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda batchFill(p_rem_edge_count + *p_valid_size, 0, CONFIG_BATCH_SIZE - *p_valid_size); __syncthreads(); int psum_offset = 1; for(int d = CONFIG_BATCH_SIZE >> 1;d > 0;d >>= 1){ if(threadIdx.x < d){ int ai = psum_offset * (2 * threadIdx.x + 1) - 1; int bi = psum_offset * (2 * threadIdx.x + 2) - 1; p_rem_edge_count[bi] += p_rem_edge_count[ai]; } psum_offset <<= 1; __syncthreads(); } SINGLE_THREADED {p_rem_edge_count[CONFIG_BATCH_SIZE - 1] = 0;} for(int d = 1;d < CONFIG_BATCH_SIZE;d *= 2){ __syncthreads(); psum_offset >>= 1; if(threadIdx.x < d){ int ai = psum_offset * (2 * threadIdx.x + 1) - 1; int bi = psum_offset * (2 * threadIdx.x + 2) - 1; int tmp = p_rem_edge_count[ai]; p_rem_edge_count[ai] = p_rem_edge_count[bi]; p_rem_edge_count[bi] += tmp; } } __syncthreads(); //batchCopy(p_task_psum, p_rem_edge_count, CONFIG_BATCH_SIZE); for(int i = threadIdx.x;i < *p_total_task;i += blockDim.x){ int vert_idx = 0; for(int j = CONFIG_BATCH_SIZE_LOG - 1;j >= 0;--j){ int new_idx = vert_idx + (1 << j); if(p_rem_edge_count[new_idx] <= i) {vert_idx = new_idx;} } int offset = i - p_rem_edge_count[vert_idx]; int src_vert = VERT(p_valid_batch[vert_idx]); int edge_id = edge_list_index[src_vert] + offset; int dst_vert = edge_dst[edge_id]; int curr_dist = DISTANCE(p_valid_batch[vert_idx]); int new_dist = curr_dist + edge_weight[edge_id]; int old_dist = atomicMin((int *)(distance + dst_vert), new_dist); if(new_dist < old_dist){ int new_idx = atomicAdd(p_inserted_node_count, 1); p_inserted_nodes[new_idx] = MAKE_KEY(dst_vert, new_dist); } } __syncthreads(); int batches = *p_inserted_node_count / CONFIG_BATCH_SIZE; for(int i = 0;i < batches;++i){ heap->insert(p_inserted_nodes + i * CONFIG_BATCH_SIZE, CONFIG_BATCH_SIZE, curr_smem_offset, distance); } int rem = *p_inserted_node_count % CONFIG_BATCH_SIZE; if(rem > 0) {heap->insert(p_inserted_nodes + batches * CONFIG_BATCH_SIZE, rem, curr_smem_offset, distance);} } while(1); } __global__ void insertInitNode(Heap_With_Aux < unsigned long long, int > * heap, unsigned long long init_node){ heap->insert(&init_node, 1, 0, NULL); }
d72f00bd7a71fc8249652ce0517b4deadea4d4b8.cu
#include "heap_with_aux.cuh" #include "sssp_config.cuh" #define VERT(x) ((x) & 0xffffffff) #define DISTANCE(x) ((x) >> 32) #define MAKE_KEY(vert, distance) ((((unsigned long long)(distance)) << 32) + (vert)) __device__ bool update_offset(unsigned long long * p_key, int * p_offset, const void * vp_edge_list_index){ const int * edge_list_index = reinterpret_cast < const int * >(vp_edge_list_index); int edge_count = edge_list_index[VERT(*p_key) + 1] - edge_list_index[VERT(*p_key)]; if(edge_count - *p_offset > CONFIG_CHUNK_SIZE){ *p_offset += CONFIG_CHUNK_SIZE; return true; } else { return false; } } __global__ void ssspKernel(Heap_With_Aux < unsigned long long, int > * heap, const int * edge_list_index, const int * edge_dst, const int * edge_weight, int * distance, unsigned long long * inserted_nodes, volatile int * term_sig){ extern __shared__ char smem[]; int curr_smem_offset = 0; DECLARE_SMEM(int, p_size, 1); DECLARE_SMEM(int, p_total_task, 1); DECLARE_SMEM(int, p_valid_size, 1); DECLARE_SMEM(int, p_batch_offset, CONFIG_BATCH_SIZE); DECLARE_SMEM(int, p_rem_edge_count, CONFIG_BATCH_SIZE); //DECLARE_SMEM(int, p_task_psum, CONFIG_BATCH_SIZE); DECLARE_SMEM(int, p_inserted_node_count, 1); DECLARE_SMEM(int, p_should_exit, 1); ALIGN_SMEM_8; DECLARE_SMEM(unsigned long long, p_batch, CONFIG_BATCH_SIZE); DECLARE_SMEM(unsigned long long, p_valid_batch, CONFIG_BATCH_SIZE); //DECLARE_SMEM(unsigned long long, p_inserted_nodes, 0); unsigned long long *p_inserted_nodes = inserted_nodes + blockIdx.x * CONFIG_BATCH_SIZE * CONFIG_CHUNK_SIZE; do{ SINGLE_THREADED {*p_size = CONFIG_BATCH_SIZE;} __syncthreads(); heap->retrieve(p_batch, p_batch_offset, p_size, update_offset, edge_list_index, curr_smem_offset); SINGLE_THREADED { *p_total_task = 0; *p_inserted_node_count = 0; *p_valid_size = 0; } __syncthreads(); for(int i = threadIdx.x;i < *p_size;i += blockDim.x){ if(DISTANCE(p_batch[i]) <= distance[VERT(p_batch[i])]){ int new_idx = atomicAdd(p_valid_size, 1); p_valid_batch[new_idx] = p_batch[i]; int remaining_edge = edge_list_index[VERT(p_batch[i]) + 1] - edge_list_index[VERT(p_batch[i])] - p_batch_offset[i]; if(remaining_edge > CONFIG_CHUNK_SIZE) {remaining_edge = CONFIG_CHUNK_SIZE;} p_rem_edge_count[new_idx] = remaining_edge; atomicAdd(p_total_task, remaining_edge); } } __syncthreads(); if(*p_valid_size == 0) { SINGLE_THREADED { *p_should_exit = 1; term_sig[blockIdx.x] = 1; for(int i = 0;i < gridDim.x;++i){ if(term_sig[i] == 0) {*p_should_exit = 0;break;} } } __syncthreads(); if(*p_should_exit) {return;} else {continue;} } else { SINGLE_THREADED {term_sig[blockIdx.x] = 0;} } //Parallel scan from https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda batchFill(p_rem_edge_count + *p_valid_size, 0, CONFIG_BATCH_SIZE - *p_valid_size); __syncthreads(); int psum_offset = 1; for(int d = CONFIG_BATCH_SIZE >> 1;d > 0;d >>= 1){ if(threadIdx.x < d){ int ai = psum_offset * (2 * threadIdx.x + 1) - 1; int bi = psum_offset * (2 * threadIdx.x + 2) - 1; p_rem_edge_count[bi] += p_rem_edge_count[ai]; } psum_offset <<= 1; __syncthreads(); } SINGLE_THREADED {p_rem_edge_count[CONFIG_BATCH_SIZE - 1] = 0;} for(int d = 1;d < CONFIG_BATCH_SIZE;d *= 2){ __syncthreads(); psum_offset >>= 1; if(threadIdx.x < d){ int ai = psum_offset * (2 * threadIdx.x + 1) - 1; int bi = psum_offset * (2 * threadIdx.x + 2) - 1; int tmp = p_rem_edge_count[ai]; p_rem_edge_count[ai] = p_rem_edge_count[bi]; p_rem_edge_count[bi] += tmp; } } __syncthreads(); //batchCopy(p_task_psum, p_rem_edge_count, CONFIG_BATCH_SIZE); for(int i = threadIdx.x;i < *p_total_task;i += blockDim.x){ int vert_idx = 0; for(int j = CONFIG_BATCH_SIZE_LOG - 1;j >= 0;--j){ int new_idx = vert_idx + (1 << j); if(p_rem_edge_count[new_idx] <= i) {vert_idx = new_idx;} } int offset = i - p_rem_edge_count[vert_idx]; int src_vert = VERT(p_valid_batch[vert_idx]); int edge_id = edge_list_index[src_vert] + offset; int dst_vert = edge_dst[edge_id]; int curr_dist = DISTANCE(p_valid_batch[vert_idx]); int new_dist = curr_dist + edge_weight[edge_id]; int old_dist = atomicMin((int *)(distance + dst_vert), new_dist); if(new_dist < old_dist){ int new_idx = atomicAdd(p_inserted_node_count, 1); p_inserted_nodes[new_idx] = MAKE_KEY(dst_vert, new_dist); } } __syncthreads(); int batches = *p_inserted_node_count / CONFIG_BATCH_SIZE; for(int i = 0;i < batches;++i){ heap->insert(p_inserted_nodes + i * CONFIG_BATCH_SIZE, CONFIG_BATCH_SIZE, curr_smem_offset, distance); } int rem = *p_inserted_node_count % CONFIG_BATCH_SIZE; if(rem > 0) {heap->insert(p_inserted_nodes + batches * CONFIG_BATCH_SIZE, rem, curr_smem_offset, distance);} } while(1); } __global__ void insertInitNode(Heap_With_Aux < unsigned long long, int > * heap, unsigned long long init_node){ heap->insert(&init_node, 1, 0, NULL); }
cbad49afc8fe3d2c2991a021b6422b2858583079.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cassert> // #include "sixtracklib/sixtracklib.h" #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> // extern void run(double **indata, double **outdata, int npart ); __global__ void test( double* x, int npart ) { if( npart > 0 ) { printf( "numbers : %.8f\r\n", x[ 0 ] ); printf( "numbers : %.8f\r\n", x[ 1 ] ); printf( "numbers : %.8f\r\n", x[ 2 ] ); printf( "numbers : %.8f\r\n", x[ 3 ] ); printf( "numbers : %.8f\r\n", x[ 4 ] ); printf( "numbers : %.8f\r\n", x[ 5 ] ); printf( "numbers : %.8f\r\n", x[ 6 ] ); } return; } int main() { int npart = 10; double* host_particle_buffer = 0; double* dev_particle_buffer = 0; hipError_t err = hipSuccess; unsigned int device_flags = 0u; hipGetDeviceFlags( &device_flags ); if( ( device_flags & hipDeviceMapHost ) != hipDeviceMapHost ) { printf( "pinned memory not available with the " "cuda device -> aborting\r\n" ); return 0; } err = hipHostMalloc( ( void** )&host_particle_buffer, npart * 240u, hipHostMallocMapped ); assert( err == hipSuccess ); assert( host_particle_buffer != 0 ); err = hipHostGetDevicePointer( ( void** )&dev_particle_buffer, host_particle_buffer, 0u ); assert( err == hipSuccess ); if( npart > 0 ) { host_particle_buffer[ 0 ] = 1.2345; host_particle_buffer[ 1 ] = 2.2345; host_particle_buffer[ 2 ] = 3.2345; host_particle_buffer[ 3 ] = 4.2345; host_particle_buffer[ 4 ] = 5.2345; host_particle_buffer[ 5 ] = 6.2345; host_particle_buffer[ 6 ] = 7.2345; } hipLaunchKernelGGL(( test), dim3(1), dim3(1) , 0, 0, dev_particle_buffer, npart ); err = hipHostFree( host_particle_buffer ); host_particle_buffer = 0; assert( err == hipSuccess ); return 0; } /* end: studies/study10/run_sample_fodo.c */
cbad49afc8fe3d2c2991a021b6422b2858583079.cu
#include <cstdio> #include <cassert> // #include "sixtracklib/sixtracklib.h" #include <cuda_runtime_api.h> #include <cuda.h> // extern void run(double **indata, double **outdata, int npart ); __global__ void test( double* x, int npart ) { if( npart > 0 ) { printf( "numbers : %.8f\r\n", x[ 0 ] ); printf( "numbers : %.8f\r\n", x[ 1 ] ); printf( "numbers : %.8f\r\n", x[ 2 ] ); printf( "numbers : %.8f\r\n", x[ 3 ] ); printf( "numbers : %.8f\r\n", x[ 4 ] ); printf( "numbers : %.8f\r\n", x[ 5 ] ); printf( "numbers : %.8f\r\n", x[ 6 ] ); } return; } int main() { int npart = 10; double* host_particle_buffer = 0; double* dev_particle_buffer = 0; cudaError_t err = cudaSuccess; unsigned int device_flags = 0u; cudaGetDeviceFlags( &device_flags ); if( ( device_flags & cudaDeviceMapHost ) != cudaDeviceMapHost ) { printf( "pinned memory not available with the " "cuda device -> aborting\r\n" ); return 0; } err = cudaHostAlloc( ( void** )&host_particle_buffer, npart * 240u, cudaHostAllocMapped ); assert( err == cudaSuccess ); assert( host_particle_buffer != 0 ); err = cudaHostGetDevicePointer( ( void** )&dev_particle_buffer, host_particle_buffer, 0u ); assert( err == cudaSuccess ); if( npart > 0 ) { host_particle_buffer[ 0 ] = 1.2345; host_particle_buffer[ 1 ] = 2.2345; host_particle_buffer[ 2 ] = 3.2345; host_particle_buffer[ 3 ] = 4.2345; host_particle_buffer[ 4 ] = 5.2345; host_particle_buffer[ 5 ] = 6.2345; host_particle_buffer[ 6 ] = 7.2345; } test<<< 1, 1 >>>( dev_particle_buffer, npart ); err = cudaFreeHost( host_particle_buffer ); host_particle_buffer = 0; assert( err == cudaSuccess ); return 0; } /* end: studies/study10/run_sample_fodo.c */
17bf135479f6e4132d7b250ad6736f2b166cfab5.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <cstdlib> #include <iostream> #include <string> #include <vector> #include "solver.h" using namespace std; typedef unsigned char uchar; int num_train = 42 * 4 + 1, num_test = 500; int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int)ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *)&magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *)&n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *)&n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *)&n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *)&magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *)&n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printTimes(vector<float> &time, string filename); void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename); int main(int argc, char *argv[]) { float *f_train_images, *f_test_images; int *f_train_labels, *f_test_labels; int rows = 224, cols = 224, channels = 3; int input_size = rows * cols * channels; // f_train_images = (float *)malloc(num_train * input_size * sizeof(float)); // f_train_labels = (int *)malloc(num_train * sizeof(int)); checkCudaErrors( hipHostMalloc(&f_train_images, num_train * input_size * sizeof(float))); checkCudaErrors(hipHostMalloc(&f_train_labels, num_train * sizeof(int))); f_test_images = (float *)malloc(num_test * input_size * sizeof(float)); f_test_labels = (int *)malloc(num_test * sizeof(int)); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < num_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= num_train; } for (int i = 0; i < num_train; i++) { for (int j = 0; j < input_size; j++) { f_train_images[i * input_size + j] -= mean_image[j]; } } for (int i = 0; i < num_test; i++) { for (int j = 0; j < input_size; j++) { f_test_images[i * input_size + j] -= mean_image[j]; } } // VGG vector<LayerSpecifier> layer_specifier; { ConvDescriptor part0_conv0; part0_conv0.initializeValues(3, 64, 3, 3, 224, 224, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part0_conv0; layer_specifier.push_back(temp); } { ConvDescriptor part0_conv1; part0_conv1.initializeValues(64, 64, 3, 3, 224, 224, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part0_conv1; layer_specifier.push_back(temp); } { PoolingDescriptor pool0; pool0.initializeValues(64, 2, 2, 224, 224, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = pool0; layer_specifier.push_back(temp); } { ConvDescriptor part1_conv0; part1_conv0.initializeValues(64, 128, 3, 3, 112, 112, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part1_conv0; layer_specifier.push_back(temp); } { ConvDescriptor part1_conv1; part1_conv1.initializeValues(128, 128, 3, 3, 112, 112, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part1_conv1; layer_specifier.push_back(temp); } { PoolingDescriptor pool1; pool1.initializeValues(128, 2, 2, 112, 112, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = pool1; layer_specifier.push_back(temp); } { ConvDescriptor part2_conv0; part2_conv0.initializeValues(128, 256, 3, 3, 56, 56, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part2_conv0; layer_specifier.push_back(temp); } { ConvDescriptor part2_conv1; part2_conv1.initializeValues(256, 256, 3, 3, 56, 56, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part2_conv1; layer_specifier.push_back(temp); } { ConvDescriptor part2_conv2; part2_conv2.initializeValues(256, 256, 3, 3, 56, 56, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part2_conv2; layer_specifier.push_back(temp); } { PoolingDescriptor pool2; pool2.initializeValues(256, 2, 2, 56, 56, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = pool2; layer_specifier.push_back(temp); } { ConvDescriptor part3_conv0; part3_conv0.initializeValues(256, 512, 3, 3, 28, 28, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part3_conv0; layer_specifier.push_back(temp); } { ConvDescriptor part3_conv1; part3_conv1.initializeValues(512, 512, 3, 3, 28, 28, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part3_conv1; layer_specifier.push_back(temp); } { ConvDescriptor part3_conv2; part3_conv2.initializeValues(512, 512, 3, 3, 28, 28, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part3_conv2; layer_specifier.push_back(temp); } { PoolingDescriptor pool3; pool3.initializeValues(512, 2, 2, 28, 28, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = pool3; layer_specifier.push_back(temp); } { ConvDescriptor part4_conv0; part4_conv0.initializeValues(512, 512, 3, 3, 14, 14, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part4_conv0; layer_specifier.push_back(temp); } { ConvDescriptor part4_conv1; part4_conv1.initializeValues(512, 512, 3, 3, 14, 14, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part4_conv1; layer_specifier.push_back(temp); } { ConvDescriptor part4_conv2; part4_conv2.initializeValues(512, 512, 3, 3, 14, 14, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part4_conv2; layer_specifier.push_back(temp); } { PoolingDescriptor pool3; pool3.initializeValues(512, 2, 2, 14, 14, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = pool3; layer_specifier.push_back(temp); } { FCDescriptor part5_fc0; part5_fc0.initializeValues(7 * 7 * 512, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = part5_fc0; layer_specifier.push_back(temp); } { FCDescriptor part5_fc1; part5_fc1.initializeValues(4096, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = part5_fc1; layer_specifier.push_back(temp); } { FCDescriptor part5_fc2; part5_fc2.initializeValues(4096, 1000); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = part5_fc2; layer_specifier.push_back(temp); } { SoftmaxDescriptor s_max; s_max.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1); LayerSpecifier temp; temp.initPointer(SOFTMAX); *((SoftmaxDescriptor *)temp.params) = s_max; layer_specifier.push_back(temp); } vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; vDNNType vdnn_type = vDNN_DYN; string filename("vdnn_dyn"); if (argc == 3) { filename.assign("vdnn"); // argv[1] - layers to offload, argv[2] - conv algo to use if (strcmp(argv[1], "dyn") == 0) { vdnn_type = vDNN_DYN; filename.append("_dyn"); } else if (strcmp(argv[1], "conv") == 0) { vdnn_type = vDNN_CONV; filename.append("_conv"); } else if (strcmp(argv[1], "all") == 0) { vdnn_type = vDNN_ALL; filename.append("_all"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0)) { if (strcmp(argv[2], "p") == 0) { vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; filename.append("_p"); } else if (strcmp(argv[2], "m") == 0) { vdnn_conv_algo = vDNN_MEMORY_OPTIMAL; filename.append("_m"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } } } int batch_size = 42; long long dropout_seed = 1; float softmax_eps = 1e-8; float init_std_dev = 0.1; NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vdnn_type, vdnn_conv_algo, SGD); int num_epoch = 1000; double learning_rate = 1e-3; double learning_rate_decay = 0.9; Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train); vector<float> loss; vector<float> time; vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag; solver.getTrainTime(loss, time, 40, fwd_vdnn_lag, bwd_vdnn_lag); printTimes(time, filename); printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename); } void printTimes(vector<float> &time, string filename) { float mean_time = 0.0; float std_dev = 0.0; int N = time.size(); for (int i = 0; i < N; i++) { mean_time += time[i]; } mean_time /= N; for (int i = 0; i < N; i++) { std_dev += pow(time[i] - mean_time, 2); } std_dev /= N; std_dev = pow(std_dev, 0.5); cout << "Average time: " << mean_time << endl; cout << "Standard deviation: " << std_dev << endl; filename.append(".dat"); fstream f; f.open(filename.c_str(), ios_base::out); for (int i = 0; i < N; i++) { f << time[i] << endl; } f << "mean_time: " << mean_time << endl; f << "standard_deviation: " << std_dev << endl; f.close(); filename.append(".bin"); fstream f_bin; f_bin.open(filename.c_str(), ios_base::out); f_bin.write((char *)&N, sizeof(N)); for (int i = 0; i < N; i++) { f_bin.write((char *)&time[i], sizeof(time[i])); } f_bin.close(); } void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename) { filename.append("_lag.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_vdnn_lag.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) { f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl; } for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) { f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl; } f << endl; } f.close(); }
17bf135479f6e4132d7b250ad6736f2b166cfab5.cu
#include <cmath> #include <cstdlib> #include <iostream> #include <string> #include <vector> #include "solver.h" using namespace std; typedef unsigned char uchar; int num_train = 42 * 4 + 1, num_test = 500; int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int)ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *)&magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *)&n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *)&n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *)&n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *)&magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *)&n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printTimes(vector<float> &time, string filename); void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename); int main(int argc, char *argv[]) { float *f_train_images, *f_test_images; int *f_train_labels, *f_test_labels; int rows = 224, cols = 224, channels = 3; int input_size = rows * cols * channels; // f_train_images = (float *)malloc(num_train * input_size * sizeof(float)); // f_train_labels = (int *)malloc(num_train * sizeof(int)); checkCudaErrors( cudaMallocHost(&f_train_images, num_train * input_size * sizeof(float))); checkCudaErrors(cudaMallocHost(&f_train_labels, num_train * sizeof(int))); f_test_images = (float *)malloc(num_test * input_size * sizeof(float)); f_test_labels = (int *)malloc(num_test * sizeof(int)); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < num_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= num_train; } for (int i = 0; i < num_train; i++) { for (int j = 0; j < input_size; j++) { f_train_images[i * input_size + j] -= mean_image[j]; } } for (int i = 0; i < num_test; i++) { for (int j = 0; j < input_size; j++) { f_test_images[i * input_size + j] -= mean_image[j]; } } // VGG vector<LayerSpecifier> layer_specifier; { ConvDescriptor part0_conv0; part0_conv0.initializeValues(3, 64, 3, 3, 224, 224, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part0_conv0; layer_specifier.push_back(temp); } { ConvDescriptor part0_conv1; part0_conv1.initializeValues(64, 64, 3, 3, 224, 224, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part0_conv1; layer_specifier.push_back(temp); } { PoolingDescriptor pool0; pool0.initializeValues(64, 2, 2, 224, 224, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = pool0; layer_specifier.push_back(temp); } { ConvDescriptor part1_conv0; part1_conv0.initializeValues(64, 128, 3, 3, 112, 112, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part1_conv0; layer_specifier.push_back(temp); } { ConvDescriptor part1_conv1; part1_conv1.initializeValues(128, 128, 3, 3, 112, 112, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part1_conv1; layer_specifier.push_back(temp); } { PoolingDescriptor pool1; pool1.initializeValues(128, 2, 2, 112, 112, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = pool1; layer_specifier.push_back(temp); } { ConvDescriptor part2_conv0; part2_conv0.initializeValues(128, 256, 3, 3, 56, 56, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part2_conv0; layer_specifier.push_back(temp); } { ConvDescriptor part2_conv1; part2_conv1.initializeValues(256, 256, 3, 3, 56, 56, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part2_conv1; layer_specifier.push_back(temp); } { ConvDescriptor part2_conv2; part2_conv2.initializeValues(256, 256, 3, 3, 56, 56, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part2_conv2; layer_specifier.push_back(temp); } { PoolingDescriptor pool2; pool2.initializeValues(256, 2, 2, 56, 56, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = pool2; layer_specifier.push_back(temp); } { ConvDescriptor part3_conv0; part3_conv0.initializeValues(256, 512, 3, 3, 28, 28, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part3_conv0; layer_specifier.push_back(temp); } { ConvDescriptor part3_conv1; part3_conv1.initializeValues(512, 512, 3, 3, 28, 28, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part3_conv1; layer_specifier.push_back(temp); } { ConvDescriptor part3_conv2; part3_conv2.initializeValues(512, 512, 3, 3, 28, 28, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part3_conv2; layer_specifier.push_back(temp); } { PoolingDescriptor pool3; pool3.initializeValues(512, 2, 2, 28, 28, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = pool3; layer_specifier.push_back(temp); } { ConvDescriptor part4_conv0; part4_conv0.initializeValues(512, 512, 3, 3, 14, 14, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part4_conv0; layer_specifier.push_back(temp); } { ConvDescriptor part4_conv1; part4_conv1.initializeValues(512, 512, 3, 3, 14, 14, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part4_conv1; layer_specifier.push_back(temp); } { ConvDescriptor part4_conv2; part4_conv2.initializeValues(512, 512, 3, 3, 14, 14, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = part4_conv2; layer_specifier.push_back(temp); } { PoolingDescriptor pool3; pool3.initializeValues(512, 2, 2, 14, 14, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = pool3; layer_specifier.push_back(temp); } { FCDescriptor part5_fc0; part5_fc0.initializeValues(7 * 7 * 512, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = part5_fc0; layer_specifier.push_back(temp); } { FCDescriptor part5_fc1; part5_fc1.initializeValues(4096, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = part5_fc1; layer_specifier.push_back(temp); } { FCDescriptor part5_fc2; part5_fc2.initializeValues(4096, 1000); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = part5_fc2; layer_specifier.push_back(temp); } { SoftmaxDescriptor s_max; s_max.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1); LayerSpecifier temp; temp.initPointer(SOFTMAX); *((SoftmaxDescriptor *)temp.params) = s_max; layer_specifier.push_back(temp); } vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; vDNNType vdnn_type = vDNN_DYN; string filename("vdnn_dyn"); if (argc == 3) { filename.assign("vdnn"); // argv[1] - layers to offload, argv[2] - conv algo to use if (strcmp(argv[1], "dyn") == 0) { vdnn_type = vDNN_DYN; filename.append("_dyn"); } else if (strcmp(argv[1], "conv") == 0) { vdnn_type = vDNN_CONV; filename.append("_conv"); } else if (strcmp(argv[1], "all") == 0) { vdnn_type = vDNN_ALL; filename.append("_all"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0)) { if (strcmp(argv[2], "p") == 0) { vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; filename.append("_p"); } else if (strcmp(argv[2], "m") == 0) { vdnn_conv_algo = vDNN_MEMORY_OPTIMAL; filename.append("_m"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } } } int batch_size = 42; long long dropout_seed = 1; float softmax_eps = 1e-8; float init_std_dev = 0.1; NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vdnn_type, vdnn_conv_algo, SGD); int num_epoch = 1000; double learning_rate = 1e-3; double learning_rate_decay = 0.9; Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train); vector<float> loss; vector<float> time; vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag; solver.getTrainTime(loss, time, 40, fwd_vdnn_lag, bwd_vdnn_lag); printTimes(time, filename); printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename); } void printTimes(vector<float> &time, string filename) { float mean_time = 0.0; float std_dev = 0.0; int N = time.size(); for (int i = 0; i < N; i++) { mean_time += time[i]; } mean_time /= N; for (int i = 0; i < N; i++) { std_dev += pow(time[i] - mean_time, 2); } std_dev /= N; std_dev = pow(std_dev, 0.5); cout << "Average time: " << mean_time << endl; cout << "Standard deviation: " << std_dev << endl; filename.append(".dat"); fstream f; f.open(filename.c_str(), ios_base::out); for (int i = 0; i < N; i++) { f << time[i] << endl; } f << "mean_time: " << mean_time << endl; f << "standard_deviation: " << std_dev << endl; f.close(); filename.append(".bin"); fstream f_bin; f_bin.open(filename.c_str(), ios_base::out); f_bin.write((char *)&N, sizeof(N)); for (int i = 0; i < N; i++) { f_bin.write((char *)&time[i], sizeof(time[i])); } f_bin.close(); } void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename) { filename.append("_lag.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_vdnn_lag.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) { f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl; } for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) { f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl; } f << endl; } f.close(); }
41aa4ade6cac5124cf804eee94173ba4266917f6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> int main(void) { hipDeviceProp_t prop; int dev; hipGetDevice (&dev); printf ("ID of current CUDA device: %d\n", dev); memset (&prop, 0, sizeof(hipDeviceProp_t)); prop.major = 1; prop.minor = 3; hipChooseDevice (&dev, &prop); printf ("ID of CUDA device closest to revision 1.3: %d\n", dev); hipSetDevice (dev); return 0; }
41aa4ade6cac5124cf804eee94173ba4266917f6.cu
#include <stdio.h> int main(void) { cudaDeviceProp prop; int dev; cudaGetDevice (&dev); printf ("ID of current CUDA device: %d\n", dev); memset (&prop, 0, sizeof(cudaDeviceProp)); prop.major = 1; prop.minor = 3; cudaChooseDevice (&dev, &prop); printf ("ID of CUDA device closest to revision 1.3: %d\n", dev); cudaSetDevice (dev); return 0; }
fe254e46dbde408950f47987b165da46b4642920.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" #include <iostream> namespace pcl { namespace device { //////////////////////////////////////////////////////////////////////////////////////// ///// Full Volume Scan6 enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 6, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y, MAX_LOCAL_POINTS = 3 }; __device__ int global_count = 0; __device__ int output_xyz_count = 0; // ************************************************* __device__ unsigned int blocks_done = 0; __shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS]; __shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS]; __shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS]; __shared__ float storage_I[CTA_SIZE * MAX_LOCAL_POINTS]; struct FullScan6 { PtrStep<short2> volume; float3 cell_size; mutable PtrSz<PointType> output; mutable PtrSz<PointType> output_xyz; mutable PtrSz<float> output_intensity; __device__ __forceinline__ float fetch (pcl::gpu::tsdf_buffer buffer, int x, int y, int z, int& weight) const { float tsdf; const short2* tmp_pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]); short2* pos = const_cast<short2*> (tmp_pos); shift_tsdf_pointer (&pos, buffer); unpack_tsdf (*pos, tsdf, weight); return tsdf; } __device__ __forceinline__ float fetch (int x, int y, int z, int& weight) const { float tsdf; unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight); return tsdf; } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; #if __CUDA_ARCH__ < 200 __shared__ int cta_buffer[CTA_SIZE]; #endif #if __CUDA_ARCH__ >= 120 if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y)) return; #else if (Emulation::All(x >= VOLUME_X, cta_buffer) || Emulation::All(y >= VOLUME_Y, cta_buffer)) return; #endif float3 V; V.x = (x + 0.5f) * cell_size.x; V.y = (y + 0.5f) * cell_size.y; int ftid = Block::flattenedThreadId (); for (int z = 0; z < VOLUME_Z - 1; ++z) { float3 points[MAX_LOCAL_POINTS]; int local_count = 0; if (x < VOLUME_X && y < VOLUME_Y) { int W; float F = fetch (x, y, z, W); if (W != 0 && F != 1.f) { V.z = (z + 0.5f) * cell_size.z; // process dx if (x + 1 < VOLUME_X) { int Wn; float Fn = fetch (x + 1, y, z, Wn); if (Wn != 0 && Fn != 1.f) if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) ) { float3 p; p.y = V.y; p.z = V.z; float Vnx = V.x + cell_size.x; float d_inv = 1.f / (fabs (F) + fabs (Fn)); p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv; points[local_count++] = p; } } /* if (x + 1 < VOLUME_X) */ // process dy if (y + 1 < VOLUME_Y) { int Wn; float Fn = fetch (x, y + 1, z, Wn); if (Wn != 0 && Fn != 1.f) if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) ) { float3 p; p.x = V.x; p.z = V.z; float Vny = V.y + cell_size.y; float d_inv = 1.f / (fabs (F) + fabs (Fn)); p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv; points[local_count++] = p; } } /* if (y + 1 < VOLUME_Y) */ // process dz // if (z + 1 < VOLUME_Z) // guaranteed by loop { int Wn; float Fn = fetch (x, y, z + 1, Wn); if (Wn != 0 && Fn != 1.f) if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0)) { float3 p; p.x = V.x; p.y = V.y; float Vnz = V.z + cell_size.z; float d_inv = 1.f / (fabs (F) + fabs (Fn)); p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv; points[local_count++] = p; } }/* if (z + 1 < VOLUME_Z) */ }/* if (W != 0 && F != 1.f) */ }/* if (x < VOLUME_X && y < VOLUME_Y) */ #if __CUDA_ARCH__ >= 200 //not we fulfilled points array at current iteration int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2)); #else int tid = Block::flattenedThreadId (); cta_buffer[tid] = local_count; int total_warp = Emulation::warp_reduce (cta_buffer, tid); #endif if (total_warp > 0) { int lane = Warp::laneId (); int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS; volatile int* cta_buffer = (int*)(storage_X + storage_index); cta_buffer[lane] = local_count; int offset = scan_warp<exclusive>(cta_buffer, lane); if (lane == 0) { int old_global_count = atomicAdd (&global_count, total_warp); cta_buffer[0] = old_global_count; } int old_global_count = cta_buffer[0]; for (int l = 0; l < local_count; ++l) { storage_X[storage_index + offset + l] = points[l].x; storage_Y[storage_index + offset + l] = points[l].y; storage_Z[storage_index + offset + l] = points[l].z; } PointType *pos = output_xyz.data + old_global_count + lane; for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE) { float x = storage_X[storage_index + idx]; float y = storage_Y[storage_index + idx]; float z = storage_Z[storage_index + idx]; store_point_type (x, y, z, pos); } bool full = (old_global_count + total_warp) >= output_xyz.size; if (full) break; } }/* for(int z = 0; z < VOLUME_Z - 1; ++z) */ /////////////////////////// // Prepare for future scans if (ftid == 0) { unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z; unsigned int value = atomicInc (&blocks_done, total_blocks); // Last block if (value == total_blocks - 1) { output_xyz_count = min ((int)output_xyz.size, global_count); blocks_done = 0; global_count = 0; } } } /* operator() */ // OPERATOR USED BY EXTRACT_SLICE_AS_CLOUD. // This operator extracts the cloud as TSDF values and X,Y,Z indices. // The previous operator generates a regular point cloud in meters. // This one generates a TSDF Point Cloud in grid indices. __device__ __forceinline__ void operator () (pcl::gpu::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; int ftid = Block::flattenedThreadId (); int minimum_Z = 0; int maximum_Z = VOLUME_Z; for (int z = minimum_Z; z < maximum_Z; ++z) { // The black zone is the name given to the subvolume within the TSDF Volume grid that is shifted out. // In other words, the set of points in the TSDF grid that we want to extract in order to add it to the world model being built in CPU. bool in_black_zone = ( (x >= minBounds.x && x <= maxBounds.x) || (y >= minBounds.y && y <= maxBounds.y) || ( z >= minBounds.z && z <= maxBounds.z) ) ; float4 points[MAX_LOCAL_POINTS]; int local_count = 0; if (x < buffer.voxels_size.x && y < buffer.voxels_size.y && in_black_zone) { int W; float F = fetch (buffer, x, y, z, W); if (W != 0.0f && F != 1.f && F < 0.98 && F != 0.0f && F > -1.0f) { float4 p; p.x = x; p.y = y; p.z = z; p.w = F; points[local_count++] = p; } }/* if (x < VOLUME_X && y < VOLUME_Y) */ // local_count counts the number of zero crossing for the current thread. Now we need to merge this knowledge with the other threads // not we fulfilled points array at current iteration int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2)); if (total_warp > 0) ///more than 0 zero-crossings { int lane = Warp::laneId (); ///index of thread within warp [0-31] int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS; // Pointer to the beginning of the current warp buffer volatile int* cta_buffer = (int*)(storage_X + storage_index); // Compute offset of current warp // Call in place scanning (see http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html) cta_buffer[lane] = local_count; int offset = scan_warp<exclusive>(cta_buffer, lane); //How many crossings did we have before index "lane" ? // We want to do only 1 operation per warp (not thread) -> because it is faster if (lane == 0) { int old_global_count = atomicAdd (&global_count, total_warp); ///We use atomicAdd, so that threads do not collide cta_buffer[0] = old_global_count; } int old_global_count = cta_buffer[0]; // Perform compaction (dump all current crossings) for (int l = 0; l < local_count; ++l) { storage_X[storage_index + offset + l] = points[l].x;// x coordinates of the points we found in STORAGE_X storage_Y[storage_index + offset + l] = points[l].y;// y coordinates of the points we found in STORAGE_Y storage_Z[storage_index + offset + l] = points[l].z;// z coordinates of the points we found in STORAGE_Z storage_I[storage_index + offset + l] = points[l].w;// Intensity values of the points we found in STORAGE_I } // Retrieve Zero-crossings as 3D points int offset_storage = old_global_count + lane; for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, offset_storage += Warp::STRIDE) { float x = storage_X[storage_index + idx]; float y = storage_Y[storage_index + idx]; float z = storage_Z[storage_index + idx]; float i = storage_I[storage_index + idx]; store_point_intensity (x, y, z, i, output_xyz.data, output_intensity.data, offset_storage); } // Sanity check to make sure our output_xyz buffer is not full already bool full = (old_global_count + total_warp) >= output_xyz.size; if (full) break; } } /* for(int z = 0; z < VOLUME_Z - 1; ++z) */ /////////////////////////// // Prepare for future scans if (ftid == 0) { unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z; unsigned int value = atomicInc (&blocks_done, total_blocks); // Last block if (value == total_blocks - 1) { output_xyz_count = min ((int)output_xyz.size, global_count); blocks_done = 0; global_count = 0; } } } /* operator() */ __device__ __forceinline__ void store_point_type (float x, float y, float z, float4* ptr) const { *ptr = make_float4 (x, y, z, 0); } //INLINE FUNCTION THAT STORES XYZ AND INTENSITY VALUES IN 2 SEPARATE DeviceArrays. // ptr_xyz: pointer to the BEGINNING of the XYZ deviceArray // ptr_instensity: pointer to the BEGINNING of the Intensity deviceArray // offset: offset to apply to both XYZ and Intensity __device__ __forceinline__ void store_point_intensity (float x, float y, float z, float i, float4* ptr_xyz, float* ptr_intensity, int offset) const { *(ptr_xyz + offset) = make_float4 (x, y, z, 0); *(ptr_intensity + offset) = i; } __device__ __forceinline__ void store_point_type (float x, float y, float z, float3* ptr) const { *ptr = make_float3 (x, y, z); } }; __global__ void extractKernel (const FullScan6 fs) { fs (); } __global__ void extractSliceKernel (const FullScan6 fs, pcl::gpu::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) { fs (buffer, minBounds, maxBounds); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// size_t pcl::device::extractCloud (const PtrStep<short2>& volume, const float3& volume_size, PtrSz<PointType> output_xyz) { FullScan6 fs; fs.volume = volume; fs.cell_size.x = volume_size.x / VOLUME_X; fs.cell_size.y = volume_size.y / VOLUME_Y; fs.cell_size.z = volume_size.z / VOLUME_Z; fs.output_xyz = output_xyz; dim3 block (CTA_SIZE_X, CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); hipLaunchKernelGGL(( extractKernel), dim3(grid), dim3(block), 0, 0, fs); cudaSafeCall ( hipGetLastError () ); cudaSafeCall ( hipDeviceSynchronize () ); int size; cudaSafeCall ( hipMemcpyFromSymbol (&size, output_xyz_count, sizeof (size)) ); // cudaSafeCall ( hipMemcpyFromSymbol (&size, "output_xyz_count", sizeof (size)) ); return ((size_t)size); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// size_t pcl::device::extractSliceAsCloud (const PtrStep<short2>& volume, const float3& volume_size, const pcl::gpu::tsdf_buffer* buffer, const int shiftX, const int shiftY, const int shiftZ, PtrSz<PointType> output_xyz, PtrSz<float> output_intensities) { FullScan6 fs; fs.volume = volume; fs.cell_size.x = volume_size.x / buffer->voxels_size.x; fs.cell_size.y = volume_size.y / buffer->voxels_size.y; fs.cell_size.z = volume_size.z / buffer->voxels_size.z; fs.output_xyz = output_xyz; fs.output_intensity = output_intensities; dim3 block (CTA_SIZE_X, CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); //Compute slice bounds int newX = buffer->origin_GRID.x + shiftX; int newY = buffer->origin_GRID.y + shiftY; int newZ = buffer->origin_GRID.z + shiftZ; int3 minBounds, maxBounds; //X if (newX >= 0) { minBounds.x = buffer->origin_GRID.x; maxBounds.x = newX; } else { minBounds.x = newX + buffer->voxels_size.x - 1; maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x - 1; } if (minBounds.x > maxBounds.x) std::swap (minBounds.x, maxBounds.x); //Y if (newY >= 0) { minBounds.y = buffer->origin_GRID.y; maxBounds.y = newY; } else { minBounds.y = newY + buffer->voxels_size.y - 1; maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y - 1; } if(minBounds.y > maxBounds.y) std::swap (minBounds.y, maxBounds.y); //Z if (newZ >= 0) { minBounds.z = buffer->origin_GRID.z; maxBounds.z = newZ; } else { minBounds.z = newZ + buffer->voxels_size.z - 1; maxBounds.z = buffer->origin_GRID.z + buffer->voxels_size.z - 1; } if (minBounds.z > maxBounds.z) std::swap(minBounds.z, maxBounds.z); minBounds.x -= buffer->origin_GRID.x; maxBounds.x -= buffer->origin_GRID.x; minBounds.y -= buffer->origin_GRID.y; maxBounds.y -= buffer->origin_GRID.y; minBounds.z -= buffer->origin_GRID.z; maxBounds.z -= buffer->origin_GRID.z; if (minBounds.x < 0) // We are shifting Left { minBounds.x += buffer->voxels_size.x; maxBounds.x += (buffer->voxels_size.x); } if (minBounds.y < 0) // We are shifting up { minBounds.y += buffer->voxels_size.y; maxBounds.y += (buffer->voxels_size.y); } if (minBounds.z < 0) // We are shifting back { minBounds.z += buffer->voxels_size.z; maxBounds.z += buffer->voxels_size.z; } // Extraction call hipLaunchKernelGGL(( extractSliceKernel), dim3(grid), dim3(block), 0, 0, fs, *buffer, minBounds, maxBounds); cudaSafeCall ( hipGetLastError () ); cudaSafeCall ( hipDeviceSynchronize () ); int size; cudaSafeCall ( hipMemcpyFromSymbol (&size, output_xyz_count, sizeof(size)) ); return (size_t)size; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { template<typename NormalType> struct ExtractNormals { float3 cell_size; PtrStep<short2> volume; PtrSz<PointType> points; mutable NormalType* output; __device__ __forceinline__ float readTsdf (int x, int y, int z) const { return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]); } __device__ __forceinline__ float3 fetchPoint (int idx) const { PointType p = points.data[idx]; return make_float3 (p.x, p.y, p.z); } __device__ __forceinline__ void storeNormal (int idx, float3 normal) const { NormalType n; n.x = normal.x; n.y = normal.y; n.z = normal.z; output[idx] = n; } __device__ __forceinline__ int3 getVoxel (const float3& point) const { int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity int vy = __float2int_rd (point.y / cell_size.y); int vz = __float2int_rd (point.z / cell_size.z); return make_int3 (vx, vy, vz); } __device__ __forceinline__ void operator () () const { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= points.size) return; const float qnan = numeric_limits<float>::quiet_NaN (); float3 n = make_float3 (qnan, qnan, qnan); float3 point = fetchPoint (idx); int3 g = getVoxel (point); if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2) { float3 t; t = point; t.x += cell_size.x; float Fx1 = interpolateTrilineary (t); t = point; t.x -= cell_size.x; float Fx2 = interpolateTrilineary (t); n.x = (Fx1 - Fx2); t = point; t.y += cell_size.y; float Fy1 = interpolateTrilineary (t); t = point; t.y -= cell_size.y; float Fy2 = interpolateTrilineary (t); n.y = (Fy1 - Fy2); t = point; t.z += cell_size.z; float Fz1 = interpolateTrilineary (t); t = point; t.z -= cell_size.z; float Fz2 = interpolateTrilineary (t); n.z = (Fz1 - Fz2); n = normalized (n); } storeNormal (idx, n); } __device__ __forceinline__ float interpolateTrilineary (const float3& point) const { int3 g = getVoxel (point); /* //OLD CODE float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; if (point.x < vx) g.x--; if (point.y < vy) g.y--; if (point.z < vz) g.z--; //float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; //float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; //float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; float a = point.x/ cell_size.x - (g.x + 0.5f); float b = point.y/ cell_size.y - (g.y + 0.5f); float c = point.z/ cell_size.z - (g.z + 0.5f); */ //NEW CODE float a = point.x/ cell_size.x - (g.x + 0.5f); if (a<0) { g.x--; a+=1.0f; }; float b = point.y/ cell_size.y - (g.y + 0.5f); if (b<0) { g.y--; b+=1.0f; }; float c = point.z/ cell_size.z - (g.z + 0.5f); if (c<0) { g.z--; c+=1.0f; }; float res = (1 - a) * ( (1 - b) * ( readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - c) + readTsdf (g.x + 0, g.y + 0, g.z + 1) * c ) + b * ( readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - c) + readTsdf (g.x + 0, g.y + 1, g.z + 1) * c ) ) + a * ( (1 - b) * ( readTsdf (g.x + 1, g.y + 0, g.z + 0) * (1 - c) + readTsdf (g.x + 1, g.y + 0, g.z + 1) * c ) + b * ( readTsdf (g.x + 1, g.y + 1, g.z + 0) * (1 - c) + readTsdf (g.x + 1, g.y + 1, g.z + 1) * c ) ); return res; } }; template<typename NormalType> __global__ void extractNormalsKernel (const ExtractNormals<NormalType> en) { en (); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template<typename NormalType> void pcl::device::extractNormals (const PtrStep<short2>& volume, const float3& volume_size, const PtrSz<PointType>& points, NormalType* output) { ExtractNormals<NormalType> en; en.volume = volume; en.cell_size.x = volume_size.x / VOLUME_X; en.cell_size.y = volume_size.y / VOLUME_Y; en.cell_size.z = volume_size.z / VOLUME_Z; en.points = points; en.output = output; dim3 block (256); dim3 grid (divUp (points.size, block.x)); hipLaunchKernelGGL(( extractNormalsKernel), dim3(grid), dim3(block), 0, 0, en); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } using namespace pcl::device; template void pcl::device::extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output); template void pcl::device::extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
fe254e46dbde408950f47987b165da46b4642920.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" #include <iostream> namespace pcl { namespace device { //////////////////////////////////////////////////////////////////////////////////////// ///// Full Volume Scan6 enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 6, CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y, MAX_LOCAL_POINTS = 3 }; __device__ int global_count = 0; __device__ int output_xyz_count = 0; // ************************************************* __device__ unsigned int blocks_done = 0; __shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS]; __shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS]; __shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS]; __shared__ float storage_I[CTA_SIZE * MAX_LOCAL_POINTS]; struct FullScan6 { PtrStep<short2> volume; float3 cell_size; mutable PtrSz<PointType> output; mutable PtrSz<PointType> output_xyz; mutable PtrSz<float> output_intensity; __device__ __forceinline__ float fetch (pcl::gpu::tsdf_buffer buffer, int x, int y, int z, int& weight) const { float tsdf; const short2* tmp_pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]); short2* pos = const_cast<short2*> (tmp_pos); shift_tsdf_pointer (&pos, buffer); unpack_tsdf (*pos, tsdf, weight); return tsdf; } __device__ __forceinline__ float fetch (int x, int y, int z, int& weight) const { float tsdf; unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight); return tsdf; } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; #if __CUDA_ARCH__ < 200 __shared__ int cta_buffer[CTA_SIZE]; #endif #if __CUDA_ARCH__ >= 120 if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y)) return; #else if (Emulation::All(x >= VOLUME_X, cta_buffer) || Emulation::All(y >= VOLUME_Y, cta_buffer)) return; #endif float3 V; V.x = (x + 0.5f) * cell_size.x; V.y = (y + 0.5f) * cell_size.y; int ftid = Block::flattenedThreadId (); for (int z = 0; z < VOLUME_Z - 1; ++z) { float3 points[MAX_LOCAL_POINTS]; int local_count = 0; if (x < VOLUME_X && y < VOLUME_Y) { int W; float F = fetch (x, y, z, W); if (W != 0 && F != 1.f) { V.z = (z + 0.5f) * cell_size.z; // process dx if (x + 1 < VOLUME_X) { int Wn; float Fn = fetch (x + 1, y, z, Wn); if (Wn != 0 && Fn != 1.f) if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) ) { float3 p; p.y = V.y; p.z = V.z; float Vnx = V.x + cell_size.x; float d_inv = 1.f / (fabs (F) + fabs (Fn)); p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv; points[local_count++] = p; } } /* if (x + 1 < VOLUME_X) */ // process dy if (y + 1 < VOLUME_Y) { int Wn; float Fn = fetch (x, y + 1, z, Wn); if (Wn != 0 && Fn != 1.f) if ( (F > 0 && Fn < 0) || (F < 0 && Fn > 0) ) { float3 p; p.x = V.x; p.z = V.z; float Vny = V.y + cell_size.y; float d_inv = 1.f / (fabs (F) + fabs (Fn)); p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv; points[local_count++] = p; } } /* if (y + 1 < VOLUME_Y) */ // process dz // if (z + 1 < VOLUME_Z) // guaranteed by loop { int Wn; float Fn = fetch (x, y, z + 1, Wn); if (Wn != 0 && Fn != 1.f) if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0)) { float3 p; p.x = V.x; p.y = V.y; float Vnz = V.z + cell_size.z; float d_inv = 1.f / (fabs (F) + fabs (Fn)); p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv; points[local_count++] = p; } }/* if (z + 1 < VOLUME_Z) */ }/* if (W != 0 && F != 1.f) */ }/* if (x < VOLUME_X && y < VOLUME_Y) */ #if __CUDA_ARCH__ >= 200 //not we fulfilled points array at current iteration int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2)); #else int tid = Block::flattenedThreadId (); cta_buffer[tid] = local_count; int total_warp = Emulation::warp_reduce (cta_buffer, tid); #endif if (total_warp > 0) { int lane = Warp::laneId (); int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS; volatile int* cta_buffer = (int*)(storage_X + storage_index); cta_buffer[lane] = local_count; int offset = scan_warp<exclusive>(cta_buffer, lane); if (lane == 0) { int old_global_count = atomicAdd (&global_count, total_warp); cta_buffer[0] = old_global_count; } int old_global_count = cta_buffer[0]; for (int l = 0; l < local_count; ++l) { storage_X[storage_index + offset + l] = points[l].x; storage_Y[storage_index + offset + l] = points[l].y; storage_Z[storage_index + offset + l] = points[l].z; } PointType *pos = output_xyz.data + old_global_count + lane; for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE) { float x = storage_X[storage_index + idx]; float y = storage_Y[storage_index + idx]; float z = storage_Z[storage_index + idx]; store_point_type (x, y, z, pos); } bool full = (old_global_count + total_warp) >= output_xyz.size; if (full) break; } }/* for(int z = 0; z < VOLUME_Z - 1; ++z) */ /////////////////////////// // Prepare for future scans if (ftid == 0) { unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z; unsigned int value = atomicInc (&blocks_done, total_blocks); // Last block if (value == total_blocks - 1) { output_xyz_count = min ((int)output_xyz.size, global_count); blocks_done = 0; global_count = 0; } } } /* operator() */ // OPERATOR USED BY EXTRACT_SLICE_AS_CLOUD. // This operator extracts the cloud as TSDF values and X,Y,Z indices. // The previous operator generates a regular point cloud in meters. // This one generates a TSDF Point Cloud in grid indices. __device__ __forceinline__ void operator () (pcl::gpu::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; int ftid = Block::flattenedThreadId (); int minimum_Z = 0; int maximum_Z = VOLUME_Z; for (int z = minimum_Z; z < maximum_Z; ++z) { // The black zone is the name given to the subvolume within the TSDF Volume grid that is shifted out. // In other words, the set of points in the TSDF grid that we want to extract in order to add it to the world model being built in CPU. bool in_black_zone = ( (x >= minBounds.x && x <= maxBounds.x) || (y >= minBounds.y && y <= maxBounds.y) || ( z >= minBounds.z && z <= maxBounds.z) ) ; float4 points[MAX_LOCAL_POINTS]; int local_count = 0; if (x < buffer.voxels_size.x && y < buffer.voxels_size.y && in_black_zone) { int W; float F = fetch (buffer, x, y, z, W); if (W != 0.0f && F != 1.f && F < 0.98 && F != 0.0f && F > -1.0f) { float4 p; p.x = x; p.y = y; p.z = z; p.w = F; points[local_count++] = p; } }/* if (x < VOLUME_X && y < VOLUME_Y) */ // local_count counts the number of zero crossing for the current thread. Now we need to merge this knowledge with the other threads // not we fulfilled points array at current iteration int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2)); if (total_warp > 0) ///more than 0 zero-crossings { int lane = Warp::laneId (); ///index of thread within warp [0-31] int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS; // Pointer to the beginning of the current warp buffer volatile int* cta_buffer = (int*)(storage_X + storage_index); // Compute offset of current warp // Call in place scanning (see http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html) cta_buffer[lane] = local_count; int offset = scan_warp<exclusive>(cta_buffer, lane); //How many crossings did we have before index "lane" ? // We want to do only 1 operation per warp (not thread) -> because it is faster if (lane == 0) { int old_global_count = atomicAdd (&global_count, total_warp); ///We use atomicAdd, so that threads do not collide cta_buffer[0] = old_global_count; } int old_global_count = cta_buffer[0]; // Perform compaction (dump all current crossings) for (int l = 0; l < local_count; ++l) { storage_X[storage_index + offset + l] = points[l].x;// x coordinates of the points we found in STORAGE_X storage_Y[storage_index + offset + l] = points[l].y;// y coordinates of the points we found in STORAGE_Y storage_Z[storage_index + offset + l] = points[l].z;// z coordinates of the points we found in STORAGE_Z storage_I[storage_index + offset + l] = points[l].w;// Intensity values of the points we found in STORAGE_I } // Retrieve Zero-crossings as 3D points int offset_storage = old_global_count + lane; for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, offset_storage += Warp::STRIDE) { float x = storage_X[storage_index + idx]; float y = storage_Y[storage_index + idx]; float z = storage_Z[storage_index + idx]; float i = storage_I[storage_index + idx]; store_point_intensity (x, y, z, i, output_xyz.data, output_intensity.data, offset_storage); } // Sanity check to make sure our output_xyz buffer is not full already bool full = (old_global_count + total_warp) >= output_xyz.size; if (full) break; } } /* for(int z = 0; z < VOLUME_Z - 1; ++z) */ /////////////////////////// // Prepare for future scans if (ftid == 0) { unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z; unsigned int value = atomicInc (&blocks_done, total_blocks); // Last block if (value == total_blocks - 1) { output_xyz_count = min ((int)output_xyz.size, global_count); blocks_done = 0; global_count = 0; } } } /* operator() */ __device__ __forceinline__ void store_point_type (float x, float y, float z, float4* ptr) const { *ptr = make_float4 (x, y, z, 0); } //INLINE FUNCTION THAT STORES XYZ AND INTENSITY VALUES IN 2 SEPARATE DeviceArrays. // ptr_xyz: pointer to the BEGINNING of the XYZ deviceArray // ptr_instensity: pointer to the BEGINNING of the Intensity deviceArray // offset: offset to apply to both XYZ and Intensity __device__ __forceinline__ void store_point_intensity (float x, float y, float z, float i, float4* ptr_xyz, float* ptr_intensity, int offset) const { *(ptr_xyz + offset) = make_float4 (x, y, z, 0); *(ptr_intensity + offset) = i; } __device__ __forceinline__ void store_point_type (float x, float y, float z, float3* ptr) const { *ptr = make_float3 (x, y, z); } }; __global__ void extractKernel (const FullScan6 fs) { fs (); } __global__ void extractSliceKernel (const FullScan6 fs, pcl::gpu::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) { fs (buffer, minBounds, maxBounds); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// size_t pcl::device::extractCloud (const PtrStep<short2>& volume, const float3& volume_size, PtrSz<PointType> output_xyz) { FullScan6 fs; fs.volume = volume; fs.cell_size.x = volume_size.x / VOLUME_X; fs.cell_size.y = volume_size.y / VOLUME_Y; fs.cell_size.z = volume_size.z / VOLUME_Z; fs.output_xyz = output_xyz; dim3 block (CTA_SIZE_X, CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); extractKernel<<<grid, block>>>(fs); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall ( cudaDeviceSynchronize () ); int size; cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_xyz_count, sizeof (size)) ); // cudaSafeCall ( cudaMemcpyFromSymbol (&size, "output_xyz_count", sizeof (size)) ); return ((size_t)size); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// size_t pcl::device::extractSliceAsCloud (const PtrStep<short2>& volume, const float3& volume_size, const pcl::gpu::tsdf_buffer* buffer, const int shiftX, const int shiftY, const int shiftZ, PtrSz<PointType> output_xyz, PtrSz<float> output_intensities) { FullScan6 fs; fs.volume = volume; fs.cell_size.x = volume_size.x / buffer->voxels_size.x; fs.cell_size.y = volume_size.y / buffer->voxels_size.y; fs.cell_size.z = volume_size.z / buffer->voxels_size.z; fs.output_xyz = output_xyz; fs.output_intensity = output_intensities; dim3 block (CTA_SIZE_X, CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); //Compute slice bounds int newX = buffer->origin_GRID.x + shiftX; int newY = buffer->origin_GRID.y + shiftY; int newZ = buffer->origin_GRID.z + shiftZ; int3 minBounds, maxBounds; //X if (newX >= 0) { minBounds.x = buffer->origin_GRID.x; maxBounds.x = newX; } else { minBounds.x = newX + buffer->voxels_size.x - 1; maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x - 1; } if (minBounds.x > maxBounds.x) std::swap (minBounds.x, maxBounds.x); //Y if (newY >= 0) { minBounds.y = buffer->origin_GRID.y; maxBounds.y = newY; } else { minBounds.y = newY + buffer->voxels_size.y - 1; maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y - 1; } if(minBounds.y > maxBounds.y) std::swap (minBounds.y, maxBounds.y); //Z if (newZ >= 0) { minBounds.z = buffer->origin_GRID.z; maxBounds.z = newZ; } else { minBounds.z = newZ + buffer->voxels_size.z - 1; maxBounds.z = buffer->origin_GRID.z + buffer->voxels_size.z - 1; } if (minBounds.z > maxBounds.z) std::swap(minBounds.z, maxBounds.z); minBounds.x -= buffer->origin_GRID.x; maxBounds.x -= buffer->origin_GRID.x; minBounds.y -= buffer->origin_GRID.y; maxBounds.y -= buffer->origin_GRID.y; minBounds.z -= buffer->origin_GRID.z; maxBounds.z -= buffer->origin_GRID.z; if (minBounds.x < 0) // We are shifting Left { minBounds.x += buffer->voxels_size.x; maxBounds.x += (buffer->voxels_size.x); } if (minBounds.y < 0) // We are shifting up { minBounds.y += buffer->voxels_size.y; maxBounds.y += (buffer->voxels_size.y); } if (minBounds.z < 0) // We are shifting back { minBounds.z += buffer->voxels_size.z; maxBounds.z += buffer->voxels_size.z; } // Extraction call extractSliceKernel<<<grid, block>>>(fs, *buffer, minBounds, maxBounds); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall ( cudaDeviceSynchronize () ); int size; cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_xyz_count, sizeof(size)) ); return (size_t)size; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { template<typename NormalType> struct ExtractNormals { float3 cell_size; PtrStep<short2> volume; PtrSz<PointType> points; mutable NormalType* output; __device__ __forceinline__ float readTsdf (int x, int y, int z) const { return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]); } __device__ __forceinline__ float3 fetchPoint (int idx) const { PointType p = points.data[idx]; return make_float3 (p.x, p.y, p.z); } __device__ __forceinline__ void storeNormal (int idx, float3 normal) const { NormalType n; n.x = normal.x; n.y = normal.y; n.z = normal.z; output[idx] = n; } __device__ __forceinline__ int3 getVoxel (const float3& point) const { int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity int vy = __float2int_rd (point.y / cell_size.y); int vz = __float2int_rd (point.z / cell_size.z); return make_int3 (vx, vy, vz); } __device__ __forceinline__ void operator () () const { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= points.size) return; const float qnan = numeric_limits<float>::quiet_NaN (); float3 n = make_float3 (qnan, qnan, qnan); float3 point = fetchPoint (idx); int3 g = getVoxel (point); if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2) { float3 t; t = point; t.x += cell_size.x; float Fx1 = interpolateTrilineary (t); t = point; t.x -= cell_size.x; float Fx2 = interpolateTrilineary (t); n.x = (Fx1 - Fx2); t = point; t.y += cell_size.y; float Fy1 = interpolateTrilineary (t); t = point; t.y -= cell_size.y; float Fy2 = interpolateTrilineary (t); n.y = (Fy1 - Fy2); t = point; t.z += cell_size.z; float Fz1 = interpolateTrilineary (t); t = point; t.z -= cell_size.z; float Fz2 = interpolateTrilineary (t); n.z = (Fz1 - Fz2); n = normalized (n); } storeNormal (idx, n); } __device__ __forceinline__ float interpolateTrilineary (const float3& point) const { int3 g = getVoxel (point); /* //OLD CODE float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; if (point.x < vx) g.x--; if (point.y < vy) g.y--; if (point.z < vz) g.z--; //float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; //float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; //float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; float a = point.x/ cell_size.x - (g.x + 0.5f); float b = point.y/ cell_size.y - (g.y + 0.5f); float c = point.z/ cell_size.z - (g.z + 0.5f); */ //NEW CODE float a = point.x/ cell_size.x - (g.x + 0.5f); if (a<0) { g.x--; a+=1.0f; }; float b = point.y/ cell_size.y - (g.y + 0.5f); if (b<0) { g.y--; b+=1.0f; }; float c = point.z/ cell_size.z - (g.z + 0.5f); if (c<0) { g.z--; c+=1.0f; }; float res = (1 - a) * ( (1 - b) * ( readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - c) + readTsdf (g.x + 0, g.y + 0, g.z + 1) * c ) + b * ( readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - c) + readTsdf (g.x + 0, g.y + 1, g.z + 1) * c ) ) + a * ( (1 - b) * ( readTsdf (g.x + 1, g.y + 0, g.z + 0) * (1 - c) + readTsdf (g.x + 1, g.y + 0, g.z + 1) * c ) + b * ( readTsdf (g.x + 1, g.y + 1, g.z + 0) * (1 - c) + readTsdf (g.x + 1, g.y + 1, g.z + 1) * c ) ); return res; } }; template<typename NormalType> __global__ void extractNormalsKernel (const ExtractNormals<NormalType> en) { en (); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template<typename NormalType> void pcl::device::extractNormals (const PtrStep<short2>& volume, const float3& volume_size, const PtrSz<PointType>& points, NormalType* output) { ExtractNormals<NormalType> en; en.volume = volume; en.cell_size.x = volume_size.x / VOLUME_X; en.cell_size.y = volume_size.y / VOLUME_Y; en.cell_size.z = volume_size.z / VOLUME_Z; en.points = points; en.output = output; dim3 block (256); dim3 grid (divUp (points.size, block.x)); extractNormalsKernel<<<grid, block>>>(en); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } using namespace pcl::device; template void pcl::device::extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output); template void pcl::device::extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
e1991bc39f63dbe86d4c03f00a3f234e8adda8bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../include/cuda_transform.h" #define LOOP_UNROLL // Bilinear Interpolation __device__ float bilinear(float q11, float q12, float q21, float q22, float scale) { return (1.0f - scale)*(1.0f - scale)*q11 + (1.0f - scale)*scale*q12 + scale*(1.0f - scale)*q21 + scale*scale*q22; } //__global__ void __launch_bounds__(MAX_BLOCK_SIZE, MIN_BLOCKS_PER_SM) resize(uchar* d_input, size_t in_pitch, int height, int width, uchar* d_output, size_t out_pitch, float scale) __global__ void resize(uchar* d_input, size_t in_pitch, int height, int width, uchar* d_output, size_t out_pitch, float scale) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; for (uint i = row; i < height; i += blockDim.y*gridDim.y) //#pragma unroll for (uint j = col; j < width; j += blockDim.x*gridDim.x) { #ifdef LOOP_UNROLL if (threadIdx.y + 1 < blockDim.y) { int r = i*scale, c = j*scale; uchar *q11 = (uchar*)((char*)d_input + r*in_pitch) + c; uchar *q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; uchar *q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; uchar *q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; // Bilinear Interpolation float p = bilinear(*q11, *q12, *q21, *q22, scale); uchar *outputPixel = (uchar*)((char*)d_output + i*out_pitch) + j; *outputPixel = (uchar)p; r = r + 1; q11 = (uchar*)((char*)d_input + r*in_pitch) + c; q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; p = bilinear(*q11, *q12, *q21, *q22, scale); outputPixel = (uchar*)((char*)d_output + (i + 1)*out_pitch) + j; *outputPixel = (uchar)p; } #else #pragma unroll for (uint k = 0; k < 2; k++) { if (threadIdx.y + 1 < blockDim.y) { int r = i*scale + k, c = j*scale; uchar *q11 = (uchar*)((char*)d_input + r*in_pitch) + c; uchar *q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; uchar *q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; uchar *q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; uchar *outputPixel = (uchar*)((char*)d_output + (i + k)*out_pitch) + j; float p = bilinear(*q11, *q12, *q21, *q22, scale); *outputPixel = (uchar)p; } } #endif // } } __global__ void tranpose(uchar *d_input, int height, int width, uchar *d_output) { int row = 32 * blockIdx.y + threadIdx.y; int col = 32 * blockIdx.x + threadIdx.x; __shared__ int smem[32][32 + 1]; if (row < height&&col < width) { //#pragma unroll for (size_t i = 0; i < 32; i += 8) { smem[threadIdx.y + i][threadIdx.x] = d_input[(row + i)*width + col]; } __syncthreads(); row = blockIdx.x*32 + threadIdx.y; col = blockIdx.y*32 + threadIdx.x; //#pragma unroll for (size_t i = 0; i < 32; i += 8) { d_output[(height - row - i)*width + width - col] = smem[threadIdx.x][threadIdx.y + i]; //d_output[(width - col - i)*width + height - row] = smem[threadIdx.x][threadIdx.y + i]; } } } void cudaResize(const cv::Mat & input, cv::Mat & output, float scale) { int newRow = int(input.rows * scale); int newCol = int(input.cols * scale); output = cv::Mat(cv::Size(newCol, newRow), CV_8U, cv::Scalar(0)); scale = 1.0f / scale; // define block size and thread size dim3 block_size(THREAD_MULTIPLE, 6); dim3 grid_size(output.cols / (4 * block_size.x), output.rows / (4 * block_size.y)); // I divide the image into 16 grid to increase ILP level. hipStream_t stream; hipStreamCreate(&stream); size_t in_pitch, out_pitch; uchar *d_input, *d_output; hipMallocPitch(&d_input, &in_pitch, sizeof(uchar)*input.cols, input.rows); hipMemcpy2DAsync(d_input, in_pitch, input.data, sizeof(uchar)*input.cols, sizeof(uchar)*input.cols, input.rows, hipMemcpyHostToDevice, stream); hipMallocPitch(&d_output, &out_pitch, sizeof(uchar)*output.cols, output.rows); hipLaunchKernelGGL(( resize) , dim3(grid_size), dim3(block_size), 0, stream , d_input, in_pitch, output.rows, output.cols, d_output, out_pitch, scale); hipDeviceSynchronize(); hipMemcpy2D(output.data, sizeof(uchar)*output.cols, d_output, out_pitch, sizeof(uchar)*output.cols, output.rows, hipMemcpyDeviceToHost); // resource releasing hipStreamDestroy(stream); hipFree(d_input); hipFree(d_output); }
e1991bc39f63dbe86d4c03f00a3f234e8adda8bd.cu
#include "../include/cuda_transform.h" #define LOOP_UNROLL // Bilinear Interpolation __device__ float bilinear(float q11, float q12, float q21, float q22, float scale) { return (1.0f - scale)*(1.0f - scale)*q11 + (1.0f - scale)*scale*q12 + scale*(1.0f - scale)*q21 + scale*scale*q22; } //__global__ void __launch_bounds__(MAX_BLOCK_SIZE, MIN_BLOCKS_PER_SM) resize(uchar* d_input, size_t in_pitch, int height, int width, uchar* d_output, size_t out_pitch, float scale) __global__ void resize(uchar* d_input, size_t in_pitch, int height, int width, uchar* d_output, size_t out_pitch, float scale) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; for (uint i = row; i < height; i += blockDim.y*gridDim.y) //#pragma unroll for (uint j = col; j < width; j += blockDim.x*gridDim.x) { #ifdef LOOP_UNROLL if (threadIdx.y + 1 < blockDim.y) { int r = i*scale, c = j*scale; uchar *q11 = (uchar*)((char*)d_input + r*in_pitch) + c; uchar *q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; uchar *q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; uchar *q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; // Bilinear Interpolation float p = bilinear(*q11, *q12, *q21, *q22, scale); uchar *outputPixel = (uchar*)((char*)d_output + i*out_pitch) + j; *outputPixel = (uchar)p; r = r + 1; q11 = (uchar*)((char*)d_input + r*in_pitch) + c; q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; p = bilinear(*q11, *q12, *q21, *q22, scale); outputPixel = (uchar*)((char*)d_output + (i + 1)*out_pitch) + j; *outputPixel = (uchar)p; } #else #pragma unroll for (uint k = 0; k < 2; k++) { if (threadIdx.y + 1 < blockDim.y) { int r = i*scale + k, c = j*scale; uchar *q11 = (uchar*)((char*)d_input + r*in_pitch) + c; uchar *q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; uchar *q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; uchar *q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; uchar *outputPixel = (uchar*)((char*)d_output + (i + k)*out_pitch) + j; float p = bilinear(*q11, *q12, *q21, *q22, scale); *outputPixel = (uchar)p; } } #endif // } } __global__ void tranpose(uchar *d_input, int height, int width, uchar *d_output) { int row = 32 * blockIdx.y + threadIdx.y; int col = 32 * blockIdx.x + threadIdx.x; __shared__ int smem[32][32 + 1]; if (row < height&&col < width) { //#pragma unroll for (size_t i = 0; i < 32; i += 8) { smem[threadIdx.y + i][threadIdx.x] = d_input[(row + i)*width + col]; } __syncthreads(); row = blockIdx.x*32 + threadIdx.y; col = blockIdx.y*32 + threadIdx.x; //#pragma unroll for (size_t i = 0; i < 32; i += 8) { d_output[(height - row - i)*width + width - col] = smem[threadIdx.x][threadIdx.y + i]; //d_output[(width - col - i)*width + height - row] = smem[threadIdx.x][threadIdx.y + i]; } } } void cudaResize(const cv::Mat & input, cv::Mat & output, float scale) { int newRow = int(input.rows * scale); int newCol = int(input.cols * scale); output = cv::Mat(cv::Size(newCol, newRow), CV_8U, cv::Scalar(0)); scale = 1.0f / scale; // define block size and thread size dim3 block_size(THREAD_MULTIPLE, 6); dim3 grid_size(output.cols / (4 * block_size.x), output.rows / (4 * block_size.y)); // I divide the image into 16 grid to increase ILP level. cudaStream_t stream; cudaStreamCreate(&stream); size_t in_pitch, out_pitch; uchar *d_input, *d_output; cudaMallocPitch(&d_input, &in_pitch, sizeof(uchar)*input.cols, input.rows); cudaMemcpy2DAsync(d_input, in_pitch, input.data, sizeof(uchar)*input.cols, sizeof(uchar)*input.cols, input.rows, cudaMemcpyHostToDevice, stream); cudaMallocPitch(&d_output, &out_pitch, sizeof(uchar)*output.cols, output.rows); resize <<<grid_size, block_size, 0, stream >>>(d_input, in_pitch, output.rows, output.cols, d_output, out_pitch, scale); cudaDeviceSynchronize(); cudaMemcpy2D(output.data, sizeof(uchar)*output.cols, d_output, out_pitch, sizeof(uchar)*output.cols, output.rows, cudaMemcpyDeviceToHost); // resource releasing cudaStreamDestroy(stream); cudaFree(d_input); cudaFree(d_output); }
fde7ad2f54e78c2cc4725cd73cb2668f5dd349b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "qmd.h" #include <stdio.h> #include <math.h> #include <stdlib.h> int DEBUG; int main(int argc, char* argv[]) { int step; MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD, &nProc); MPI_Comm_rank(MPI_COMM_WORLD, &myid); hipSetDevice(myid%2); usingCUDA = true; DEBUG = false; init_Parameters(); init_Variables(); init_Propogators(); init_WaveFunction(); if (usingCUDA) init_GPU(); for (step=1; step<=nStep; ++step) { single_Step(); if (step%energyInterval == 0) { calculate_Energy(); if (myid==0) printf("%le %le %le %le\n", deltaTime*step, expected[_T_], expected[_V_], expected[_E_]); //if (myid == 0) printf("%le, %le\n", deltaTime*step, expected[_V_]); } } cleanUpVariables(); if (usingCUDA) GPU_Finalize(); MPI_Finalize(); return 0; } /* Initialize GPU communication and variables **********************************************/ void init_GPU() { size_t size = 2*(MESH_SIZE)*sizeof(double); hipMalloc((double**) &dev_psi, size); hipMalloc((double**) &dev_workPsi, size); hipMalloc((double**) &dev_TDiag_half, 2 * 2 * sizeof(double)); hipMalloc((double**) &dev_TDiag_full, 2 * 2 * sizeof(double)); hipMalloc((double**) &dev_TUpper_half, size); hipMalloc((double**) &dev_TUpper_full, size); hipMalloc((double**) &dev_TLower_half, size); hipMalloc((double**) &dev_TLower_full, size); hipMalloc((double**) &dev_VPropogator, size); hostToDevice(&TDiag[FULL], dev_TDiag_full, 1); hostToDevice(TUpper[FULL], dev_TUpper_full, MESH_SIZE); hostToDevice(TLower[FULL], dev_TLower_full, MESH_SIZE); hostToDevice(&TDiag[HALF], dev_TDiag_half, 1); hostToDevice(TUpper[HALF], dev_TUpper_half, MESH_SIZE); hostToDevice(TLower[HALF], dev_TLower_half, MESH_SIZE); } /* Cleans up cudaMalloced memory */ void GPU_Finalize() { hipFree(&dev_psi); hipFree(&dev_workPsi); hipFree(&dev_TDiag_half); hipFree(&dev_TDiag_full); hipFree(&dev_TUpper_half); hipFree(&dev_TUpper_full); hipFree(&dev_TLower_half); hipFree(&dev_TLower_full); hipFree(&dev_VPropogator); } /* Initialize parameters for the simulation by reading in from a file **********************/ void init_Parameters() { scanf("%le", &Lx); scanf("%le", &deltaTime); scanf("%d", &nStep); scanf("%d", &energyInterval); scanf("%le %le %le", &x0, &s0, &e0); scanf("%le %le", &barrierHeight, &barrierWidth); scanf("%le", &edgePotential); if (DEBUG) { printf("Lx = %le\n", Lx); printf("deltaT = %le\n", deltaTime); printf("nStep = %d\n", nStep); printf("energyInterval = %d\n", energyInterval); printf("x0, s0, e0 = %le %le %le\n", x0, s0, e0); printf("bH, BW = %le %le\n", barrierHeight, barrierWidth); printf("edgePotential = %le\n", edgePotential); } MPI_Bcast(&Lx, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&deltaTime, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&nStep, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&energyInterval, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&x0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&s0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&e0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&barrierWidth, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&barrierHeight, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&edgePotential, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); // Mesh Size dx = (double) Lx / NX; } void init_Variables() { int i,j; host_psi = (double**) malloc(MESH_SIZE * sizeof(double*)); for (i=0; i<MESH_SIZE; ++i) { host_psi[i] = (double *) malloc(sizeof(double)*2); } workPsi = (double**) malloc(MESH_SIZE * sizeof(double*)); for (i=0; i<MESH_SIZE; ++i) { workPsi[i] = (double *) malloc (2 * sizeof(double)); } TDiag = (double**) malloc(sizeof(double*) * 2); for (i=0; i<2; ++i) { TDiag[i] = (double*) malloc(sizeof(double) * 2); } TUpper = (double***) malloc(sizeof(double**) * 2); for (i=0; i<2; ++i) { TUpper[i] = (double**) malloc(sizeof(double*) * MESH_SIZE); for (j=0; j<MESH_SIZE; ++j) { TUpper[i][j] = (double*) malloc(sizeof(double) * COMPLEX); } } TLower = (double***) malloc(sizeof(double**) * 2); for (i=0; i<2; ++i) { TLower[i] = (double**) malloc(sizeof(double*) * MESH_SIZE); for (j=0; j<MESH_SIZE; ++j) { TLower[i][j] = (double*) malloc(sizeof(double) * COMPLEX); } } VPropogator = (double**) malloc(sizeof(double*) * MESH_SIZE); for (i=0; i<MESH_SIZE; ++i) { VPropogator[i] = (double *) malloc(sizeof(double)*2); } potential = (double*) malloc(sizeof(double) * MESH_SIZE); } void init_Propogators() { double a, exp_p[2], ePlus[2], eMinus[2]; int i,c,upper,lower,step; //for iterating double x; a = 0.5 / (dx * dx); //diagonal element for kinetic propogator. // Construct the Kinetic Propogators for (step=0; step<2; ++step) { // Get espilon(1|2)(+|-) values exp_p[0] = cos(-(step+1) * deltaTime * a); exp_p[1] = sin(-(step+1) * deltaTime * a); ePlus[0] = 0.5 * (1.0 + exp_p[0]); ePlus[1] = 0.5 * exp_p[1]; eMinus[0] = 0.5 * (1.0 - exp_p[0]); eMinus[1] = -0.5 * exp_p[1]; for (c=0; c<2; ++c) { TDiag[step][c] = ePlus[c]; } for (i=1; i<=NX; ++i) { if (step == HALF) { upper = i%2; lower = (i+1)%2; } else { upper = (i+1)%2; lower = i%2; } for (c=0; c<2; ++c) { //if (myid==0) printf("Loop: step=%i i=%i, c=%i\n", step, i, c); TUpper[step][i][c] = upper * eMinus[c]; TLower[step][i][c] = lower * eMinus[c]; } } } // Construct Potential Propogators for (i=1; i<=NX; ++i) { x = (dx*i + Lx*myid); // Edge Potential if ((myid==0 && i==1) || (myid==nProc-1 && i==NX)) { potential[i] = edgePotential; } else if (0.5*(Lx*nProc-barrierWidth)<x && x<0.5*(Lx*nProc+barrierWidth)){ potential[i] = barrierHeight; } else potential[i] = 0; VPropogator[i][RE] = cos(-0.5 * deltaTime * potential[i]); VPropogator[i][IM] = sin(-0.5 * deltaTime * potential[i]); } } void init_WaveFunction() { int sx, c; double x, gaussian, normalize; double psiSquared, temp; double DXSX, LXMYID; //for debugging // Calculuate Psi point-by-point for (sx=1; sx<=NX; ++sx) { DXSX = (double) dx * sx; LXMYID = (double) Lx * myid; temp = DXSX + LXMYID; x = temp - x0; gaussian = exp((-0.25 * x * x) / (s0 * s0)); host_psi[sx][RE] = gaussian * cos(sqrt(2.0 * e0) * x); host_psi[sx][IM] = gaussian * sin(sqrt(2.0 * e0) * x); } // Normalize temp = 0.0; for (sx=1; sx<=NX; ++sx) { for (c=0; c<2; ++c) { temp += (host_psi[sx][c]*host_psi[sx][c]); } } MPI_Allreduce(&temp, &psiSquared, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); psiSquared *= dx; //printf("<Proc: %i> PSI SQUARED: %le\n", myid, psiSquared); normalize = 1.0 / sqrt(psiSquared); for (sx=1; sx<=NX; ++sx) { for (c=0; c<2; ++c) { host_psi[sx][c] *= normalize; } } } void cleanUpVariables() { int i,j; for (i=0; i<MESH_SIZE; ++i) { free(host_psi[i]); } free(host_psi); for (i=0; i<MESH_SIZE; ++i) { free(workPsi[i]); } free(workPsi); for (i=0; i<2; ++i) { free(TDiag[i]); } free(TDiag); for (i=0; i<2; ++i) { for (j=0; j<MESH_SIZE; ++j) { free(TUpper[i][j]); } free(TUpper[i]); } free(TUpper); for (i=0; i<2; ++i) { for (j=0; j<MESH_SIZE; ++j) { free(TLower[i][j]); } free(TLower[i]); } free(TLower); for (i=0; i<MESH_SIZE; ++i) { free(VPropogator[i]); } free(VPropogator); free(potential); } /************************************************************************************** * Time Stepping/Propogation Functions **************************************************************************************/ /* SIngle time step in Quantum Dynamics simulation */ void single_Step() { potential_Propogation(); //Half Potential Propogator kinetic_Propogation(HALF); //Half Kinetic Propogator kinetic_Propogation(FULL); //Full Kinetic Propogator kinetic_Propogation(HALF); //Half Kinetic Propogator potential_Propogation(); //getPsiSquared(); } void getPsiSquared() { double temp, psiSquared; int sx, c; temp = 0.0; psiSquared = 0.0; for (sx=1; sx<=NX; ++sx) { for (c=0; c<2; ++c) { temp += (host_psi[sx][c]*host_psi[sx][c]); } } MPI_Allreduce(&temp, &psiSquared, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); psiSquared *= dx; if (myid==0) printf("Psi Squared: %le\n", psiSquared); } /* Half Potential Energy propogator Function */ void potential_Propogation() { if (usingCUDA) { hostToDevice(host_psi, dev_psi, MESH_SIZE); hipLaunchKernelGGL(( gpu_Potential_Prop) , dim3(gridDim), dim3(blockDim), 0, 0, dev_psi, dev_VPropogator); deviceToHost(dev_psi, host_psi, MESH_SIZE); } else { regular_Potential_Prop(); } } __global__ void gpu_Potential_Prop(double *psi, double *vProp) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int sx, s_Re, s_Im; double workPsi_Re, workPsi_Im; sx= tid+1; s_Re = 2*sx; s_Im = 2*sx+1; workPsi_Re = vProp[s_Re]*psi[s_Re] - vProp[s_Im]*psi[s_Im]; workPsi_Im = vProp[s_Re]*psi[s_Im] + vProp[s_Im]*psi[s_Re]; psi[s_Re] = workPsi_Re; psi[s_Im] = workPsi_Im; } void regular_Potential_Prop() { int sx; double workPsi_Re, workPsi_Im; for (sx=1; sx<=NX; ++sx) { workPsi_Re = VPropogator[sx][RE]*host_psi[sx][RE] - VPropogator[sx][IM]*host_psi[sx][IM]; workPsi_Im = VPropogator[sx][RE]*host_psi[sx][IM] + VPropogator[sx][IM]*host_psi[sx][RE]; host_psi[sx][RE]= workPsi_Re; host_psi[sx][IM]= workPsi_Im; } } /* Half|Full Kinetic Energy Propogator Function */ void kinetic_Propogation(int stepSize) { //First apply periodic bounds periodic_Bounds(); if (usingCUDA) { hostToDevice(host_psi, dev_psi, MESH_SIZE); if (stepSize == FULL) { hipLaunchKernelGGL(( gpu_Kinetic_Prop), dim3(gridDim), dim3(blockDim), 0, 0, dev_psi, dev_workPsi, dev_TDiag_full, dev_TLower_full, dev_TUpper_full); } else { hipLaunchKernelGGL(( gpu_Kinetic_Prop), dim3(gridDim), dim3(blockDim), 0, 0, dev_psi, dev_workPsi, dev_TDiag_half, dev_TLower_half, dev_TUpper_half); } hipLaunchKernelGGL(( gpu_WorkToPsi), dim3(gridDim), dim3(blockDim), 0, 0, dev_workPsi, dev_psi); deviceToHost(dev_psi, host_psi, MESH_SIZE); } else { regular_Kinetic_Prop(stepSize); } } __global__ void gpu_Kinetic_Prop(double *psi, double *work, double *al, double *blx, double *bux) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int sx, s_Re, s_Im, l_Re, l_Im, u_Re, u_Im; double workPsi_Re, workPsi_Im; sx= tid+1; s_Re = 2*sx; s_Im = 2*sx+1; l_Re = 2*(sx-1); l_Im = 2*(sx-1)+1; u_Re = 2*(sx+1); u_Im = 2*(sx+1)+1; workPsi_Re = al[0]*psi[s_Re] - al[1]*psi[s_Im]; workPsi_Im = al[0]*psi[s_Im] + al[1]*psi[s_Re]; workPsi_Re += blx[s_Re]*psi[l_Re] - blx[s_Im]*psi[l_Im]; workPsi_Im += blx[s_Re]*psi[l_Im] + blx[s_Im]*psi[l_Re]; workPsi_Re += bux[s_Re]*psi[u_Re] - bux[s_Im]*psi[u_Im]; workPsi_Im += bux[s_Re]*psi[u_Im] + bux[s_Im]*psi[u_Re]; work[s_Re] = workPsi_Re; work[s_Im] = workPsi_Im; } void regular_Kinetic_Prop(int t) { int sx, c; double workPsi_Re, workPsi_Im; for (sx=1; sx<=NX; ++sx) { workPsi_Re = TDiag[t][RE]*host_psi[sx][RE] - TDiag[t][IM]*host_psi[sx][IM]; workPsi_Im = TDiag[t][RE]*host_psi[sx][IM] + TDiag[t][IM]*host_psi[sx][RE]; workPsi_Re += (TLower[t][sx][RE]*host_psi[sx-1][RE]) - (TLower[t][sx][IM]*host_psi[sx-1][IM]); workPsi_Im += (TLower[t][sx][RE]*host_psi[sx-1][IM]) + (TLower[t][sx][IM]*host_psi[sx-1][RE]); workPsi_Re += (TUpper[t][sx][RE]*host_psi[sx+1][RE]) - (TUpper[t][sx][IM]*host_psi[sx+1][IM]); workPsi_Im += (TUpper[t][sx][RE]*host_psi[sx+1][IM]) + (TUpper[t][sx][IM]*host_psi[sx+1][RE]); workPsi[sx][RE] = workPsi_Re; workPsi[sx][IM] = workPsi_Im; } for (sx=1; sx<=NX; ++sx) { for (c=0; c<2; ++c) { host_psi[sx][c] = workPsi[sx][c]; } } } /************************************************************************************* * HOST-DEVICE CONVERSION Functions ************************************************************************************/ __global__ void gpu_WorkToPsi(double *work, double *psi) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int sx, s_Re, s_Im; sx= tid+1; s_Re = 2*sx; s_Im = 2*sx+1; psi[s_Re] = work[s_Re]; psi[s_Im] = work[s_Im]; } void hostToDevice(double** host, double* device, int size) { int i,j; double *hostBuf; hostBuf = (double*) malloc(sizeof(double) * size * 2); for (i=0; i<size; ++i) { for (j=0; j<2; ++j) { hostBuf[2*i + j] = host[i][j]; } } hipMemcpy((void*) device, hostBuf, 2*size*sizeof(double), hipMemcpyHostToDevice); free(hostBuf); } void deviceToHost(double* device, double** host, int size) { int i,j; double *devBuf; devBuf = (double*) malloc(sizeof(double) * size * 2); hipMemcpy((void*) devBuf, device, 2*size*sizeof(double), hipMemcpyDeviceToHost); for (i=0; i<size; ++i) { for (j=0; j<2; ++j) { host[i][j] = devBuf[2*i + j]; } } free(devBuf); } /************************************************************************************** * MPI Related Functions *************************************************************************************/ /* Applies Periodic boundary conditions */ void periodic_Bounds() { int neighbor[2]; double sendBuf[2], recvBuf[2]; neighbor[right] = (myid + 1) % nProc; neighbor[left] = (myid - 1 + nProc) % nProc; // Send right, receive left sendBuf[0] = host_psi[NX][RE]; sendBuf[1] = host_psi[NX][IM]; MPI_Irecv(recvBuf, 2, MPI_DOUBLE, neighbor[left], DAEMON, MPI_COMM_WORLD, &request); MPI_Send(sendBuf, 2, MPI_DOUBLE, neighbor[right], DAEMON, MPI_COMM_WORLD); MPI_Wait(&request, &status); host_psi[0][RE] = recvBuf[RE]; host_psi[0][IM] = recvBuf[IM]; // Send left, receive right sendBuf[0] = host_psi[1][RE]; sendBuf[1] = host_psi[1][IM]; MPI_Irecv(recvBuf, 2, MPI_DOUBLE, neighbor[right], DAEMON+1, MPI_COMM_WORLD, &request); MPI_Send(sendBuf, 2, MPI_DOUBLE, neighbor[left], DAEMON+1, MPI_COMM_WORLD); MPI_Wait(&request, &status); host_psi[NX+1][RE] = recvBuf[RE]; host_psi[NX+1][IM] = recvBuf[IM]; } /* Total Energy Calculation at the current time step */ void calculate_Energy() { int sx, c; double a, b; // Apply periodic boundary conditions periodic_Bounds(); // Tridiagonal KE operators a = 1.0 / (dx * dx); b = -0.5 / (dx * dx); // | work > = (-1/2) Laplacian | Psi > for (sx=1; sx<=NX; ++sx) for (c=0; c<2; ++c) workPsi[sx][c] = a*host_psi[sx][c] + b*(host_psi[sx-1][c] + host_psi[sx+1][c]); // Expected Value for Kinetic Energy expected[_T_] = 0.0; for (sx=1; sx<=NX; ++sx) expected[_T_] += (host_psi[sx][RE]*workPsi[sx][RE] + host_psi[sx][IM]*workPsi[sx][IM]); expected[_T_] *= dx; MPI_Allreduce(&expected[_T_], &expected[_T_], 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); // Expected Value for Potential Energy expected[_V_] = 0.0; for (sx=1; sx<=NX; ++sx) expected[_V_] += potential[sx]*(host_psi[sx][RE]*host_psi[sx][RE] + host_psi[sx][IM]*host_psi[sx][IM]); expected[_V_] *= dx; MPI_Allreduce(&expected[_V_], &expected[_V_], 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); // Expected total Energy expected[_E_] = expected[_T_] + expected[_V_]; }
fde7ad2f54e78c2cc4725cd73cb2668f5dd349b0.cu
#include "qmd.h" #include <stdio.h> #include <math.h> #include <stdlib.h> int DEBUG; int main(int argc, char* argv[]) { int step; MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD, &nProc); MPI_Comm_rank(MPI_COMM_WORLD, &myid); cudaSetDevice(myid%2); usingCUDA = true; DEBUG = false; init_Parameters(); init_Variables(); init_Propogators(); init_WaveFunction(); if (usingCUDA) init_GPU(); for (step=1; step<=nStep; ++step) { single_Step(); if (step%energyInterval == 0) { calculate_Energy(); if (myid==0) printf("%le %le %le %le\n", deltaTime*step, expected[_T_], expected[_V_], expected[_E_]); //if (myid == 0) printf("%le, %le\n", deltaTime*step, expected[_V_]); } } cleanUpVariables(); if (usingCUDA) GPU_Finalize(); MPI_Finalize(); return 0; } /* Initialize GPU communication and variables **********************************************/ void init_GPU() { size_t size = 2*(MESH_SIZE)*sizeof(double); cudaMalloc((double**) &dev_psi, size); cudaMalloc((double**) &dev_workPsi, size); cudaMalloc((double**) &dev_TDiag_half, 2 * 2 * sizeof(double)); cudaMalloc((double**) &dev_TDiag_full, 2 * 2 * sizeof(double)); cudaMalloc((double**) &dev_TUpper_half, size); cudaMalloc((double**) &dev_TUpper_full, size); cudaMalloc((double**) &dev_TLower_half, size); cudaMalloc((double**) &dev_TLower_full, size); cudaMalloc((double**) &dev_VPropogator, size); hostToDevice(&TDiag[FULL], dev_TDiag_full, 1); hostToDevice(TUpper[FULL], dev_TUpper_full, MESH_SIZE); hostToDevice(TLower[FULL], dev_TLower_full, MESH_SIZE); hostToDevice(&TDiag[HALF], dev_TDiag_half, 1); hostToDevice(TUpper[HALF], dev_TUpper_half, MESH_SIZE); hostToDevice(TLower[HALF], dev_TLower_half, MESH_SIZE); } /* Cleans up cudaMalloced memory */ void GPU_Finalize() { cudaFree(&dev_psi); cudaFree(&dev_workPsi); cudaFree(&dev_TDiag_half); cudaFree(&dev_TDiag_full); cudaFree(&dev_TUpper_half); cudaFree(&dev_TUpper_full); cudaFree(&dev_TLower_half); cudaFree(&dev_TLower_full); cudaFree(&dev_VPropogator); } /* Initialize parameters for the simulation by reading in from a file **********************/ void init_Parameters() { scanf("%le", &Lx); scanf("%le", &deltaTime); scanf("%d", &nStep); scanf("%d", &energyInterval); scanf("%le %le %le", &x0, &s0, &e0); scanf("%le %le", &barrierHeight, &barrierWidth); scanf("%le", &edgePotential); if (DEBUG) { printf("Lx = %le\n", Lx); printf("deltaT = %le\n", deltaTime); printf("nStep = %d\n", nStep); printf("energyInterval = %d\n", energyInterval); printf("x0, s0, e0 = %le %le %le\n", x0, s0, e0); printf("bH, BW = %le %le\n", barrierHeight, barrierWidth); printf("edgePotential = %le\n", edgePotential); } MPI_Bcast(&Lx, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&deltaTime, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&nStep, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&energyInterval, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&x0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&s0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&e0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&barrierWidth, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&barrierHeight, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&edgePotential, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); // Mesh Size dx = (double) Lx / NX; } void init_Variables() { int i,j; host_psi = (double**) malloc(MESH_SIZE * sizeof(double*)); for (i=0; i<MESH_SIZE; ++i) { host_psi[i] = (double *) malloc(sizeof(double)*2); } workPsi = (double**) malloc(MESH_SIZE * sizeof(double*)); for (i=0; i<MESH_SIZE; ++i) { workPsi[i] = (double *) malloc (2 * sizeof(double)); } TDiag = (double**) malloc(sizeof(double*) * 2); for (i=0; i<2; ++i) { TDiag[i] = (double*) malloc(sizeof(double) * 2); } TUpper = (double***) malloc(sizeof(double**) * 2); for (i=0; i<2; ++i) { TUpper[i] = (double**) malloc(sizeof(double*) * MESH_SIZE); for (j=0; j<MESH_SIZE; ++j) { TUpper[i][j] = (double*) malloc(sizeof(double) * COMPLEX); } } TLower = (double***) malloc(sizeof(double**) * 2); for (i=0; i<2; ++i) { TLower[i] = (double**) malloc(sizeof(double*) * MESH_SIZE); for (j=0; j<MESH_SIZE; ++j) { TLower[i][j] = (double*) malloc(sizeof(double) * COMPLEX); } } VPropogator = (double**) malloc(sizeof(double*) * MESH_SIZE); for (i=0; i<MESH_SIZE; ++i) { VPropogator[i] = (double *) malloc(sizeof(double)*2); } potential = (double*) malloc(sizeof(double) * MESH_SIZE); } void init_Propogators() { double a, exp_p[2], ePlus[2], eMinus[2]; int i,c,upper,lower,step; //for iterating double x; a = 0.5 / (dx * dx); //diagonal element for kinetic propogator. // Construct the Kinetic Propogators for (step=0; step<2; ++step) { // Get espilon(1|2)(+|-) values exp_p[0] = cos(-(step+1) * deltaTime * a); exp_p[1] = sin(-(step+1) * deltaTime * a); ePlus[0] = 0.5 * (1.0 + exp_p[0]); ePlus[1] = 0.5 * exp_p[1]; eMinus[0] = 0.5 * (1.0 - exp_p[0]); eMinus[1] = -0.5 * exp_p[1]; for (c=0; c<2; ++c) { TDiag[step][c] = ePlus[c]; } for (i=1; i<=NX; ++i) { if (step == HALF) { upper = i%2; lower = (i+1)%2; } else { upper = (i+1)%2; lower = i%2; } for (c=0; c<2; ++c) { //if (myid==0) printf("Loop: step=%i i=%i, c=%i\n", step, i, c); TUpper[step][i][c] = upper * eMinus[c]; TLower[step][i][c] = lower * eMinus[c]; } } } // Construct Potential Propogators for (i=1; i<=NX; ++i) { x = (dx*i + Lx*myid); // Edge Potential if ((myid==0 && i==1) || (myid==nProc-1 && i==NX)) { potential[i] = edgePotential; } else if (0.5*(Lx*nProc-barrierWidth)<x && x<0.5*(Lx*nProc+barrierWidth)){ potential[i] = barrierHeight; } else potential[i] = 0; VPropogator[i][RE] = cos(-0.5 * deltaTime * potential[i]); VPropogator[i][IM] = sin(-0.5 * deltaTime * potential[i]); } } void init_WaveFunction() { int sx, c; double x, gaussian, normalize; double psiSquared, temp; double DXSX, LXMYID; //for debugging // Calculuate Psi point-by-point for (sx=1; sx<=NX; ++sx) { DXSX = (double) dx * sx; LXMYID = (double) Lx * myid; temp = DXSX + LXMYID; x = temp - x0; gaussian = exp((-0.25 * x * x) / (s0 * s0)); host_psi[sx][RE] = gaussian * cos(sqrt(2.0 * e0) * x); host_psi[sx][IM] = gaussian * sin(sqrt(2.0 * e0) * x); } // Normalize temp = 0.0; for (sx=1; sx<=NX; ++sx) { for (c=0; c<2; ++c) { temp += (host_psi[sx][c]*host_psi[sx][c]); } } MPI_Allreduce(&temp, &psiSquared, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); psiSquared *= dx; //printf("<Proc: %i> PSI SQUARED: %le\n", myid, psiSquared); normalize = 1.0 / sqrt(psiSquared); for (sx=1; sx<=NX; ++sx) { for (c=0; c<2; ++c) { host_psi[sx][c] *= normalize; } } } void cleanUpVariables() { int i,j; for (i=0; i<MESH_SIZE; ++i) { free(host_psi[i]); } free(host_psi); for (i=0; i<MESH_SIZE; ++i) { free(workPsi[i]); } free(workPsi); for (i=0; i<2; ++i) { free(TDiag[i]); } free(TDiag); for (i=0; i<2; ++i) { for (j=0; j<MESH_SIZE; ++j) { free(TUpper[i][j]); } free(TUpper[i]); } free(TUpper); for (i=0; i<2; ++i) { for (j=0; j<MESH_SIZE; ++j) { free(TLower[i][j]); } free(TLower[i]); } free(TLower); for (i=0; i<MESH_SIZE; ++i) { free(VPropogator[i]); } free(VPropogator); free(potential); } /************************************************************************************** * Time Stepping/Propogation Functions **************************************************************************************/ /* SIngle time step in Quantum Dynamics simulation */ void single_Step() { potential_Propogation(); //Half Potential Propogator kinetic_Propogation(HALF); //Half Kinetic Propogator kinetic_Propogation(FULL); //Full Kinetic Propogator kinetic_Propogation(HALF); //Half Kinetic Propogator potential_Propogation(); //getPsiSquared(); } void getPsiSquared() { double temp, psiSquared; int sx, c; temp = 0.0; psiSquared = 0.0; for (sx=1; sx<=NX; ++sx) { for (c=0; c<2; ++c) { temp += (host_psi[sx][c]*host_psi[sx][c]); } } MPI_Allreduce(&temp, &psiSquared, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); psiSquared *= dx; if (myid==0) printf("Psi Squared: %le\n", psiSquared); } /* Half Potential Energy propogator Function */ void potential_Propogation() { if (usingCUDA) { hostToDevice(host_psi, dev_psi, MESH_SIZE); gpu_Potential_Prop <<<gridDim, blockDim>>> (dev_psi, dev_VPropogator); deviceToHost(dev_psi, host_psi, MESH_SIZE); } else { regular_Potential_Prop(); } } __global__ void gpu_Potential_Prop(double *psi, double *vProp) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int sx, s_Re, s_Im; double workPsi_Re, workPsi_Im; sx= tid+1; s_Re = 2*sx; s_Im = 2*sx+1; workPsi_Re = vProp[s_Re]*psi[s_Re] - vProp[s_Im]*psi[s_Im]; workPsi_Im = vProp[s_Re]*psi[s_Im] + vProp[s_Im]*psi[s_Re]; psi[s_Re] = workPsi_Re; psi[s_Im] = workPsi_Im; } void regular_Potential_Prop() { int sx; double workPsi_Re, workPsi_Im; for (sx=1; sx<=NX; ++sx) { workPsi_Re = VPropogator[sx][RE]*host_psi[sx][RE] - VPropogator[sx][IM]*host_psi[sx][IM]; workPsi_Im = VPropogator[sx][RE]*host_psi[sx][IM] + VPropogator[sx][IM]*host_psi[sx][RE]; host_psi[sx][RE]= workPsi_Re; host_psi[sx][IM]= workPsi_Im; } } /* Half|Full Kinetic Energy Propogator Function */ void kinetic_Propogation(int stepSize) { //First apply periodic bounds periodic_Bounds(); if (usingCUDA) { hostToDevice(host_psi, dev_psi, MESH_SIZE); if (stepSize == FULL) { gpu_Kinetic_Prop<<<gridDim, blockDim>>>(dev_psi, dev_workPsi, dev_TDiag_full, dev_TLower_full, dev_TUpper_full); } else { gpu_Kinetic_Prop<<<gridDim, blockDim>>>(dev_psi, dev_workPsi, dev_TDiag_half, dev_TLower_half, dev_TUpper_half); } gpu_WorkToPsi<<<gridDim, blockDim>>>(dev_workPsi, dev_psi); deviceToHost(dev_psi, host_psi, MESH_SIZE); } else { regular_Kinetic_Prop(stepSize); } } __global__ void gpu_Kinetic_Prop(double *psi, double *work, double *al, double *blx, double *bux) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int sx, s_Re, s_Im, l_Re, l_Im, u_Re, u_Im; double workPsi_Re, workPsi_Im; sx= tid+1; s_Re = 2*sx; s_Im = 2*sx+1; l_Re = 2*(sx-1); l_Im = 2*(sx-1)+1; u_Re = 2*(sx+1); u_Im = 2*(sx+1)+1; workPsi_Re = al[0]*psi[s_Re] - al[1]*psi[s_Im]; workPsi_Im = al[0]*psi[s_Im] + al[1]*psi[s_Re]; workPsi_Re += blx[s_Re]*psi[l_Re] - blx[s_Im]*psi[l_Im]; workPsi_Im += blx[s_Re]*psi[l_Im] + blx[s_Im]*psi[l_Re]; workPsi_Re += bux[s_Re]*psi[u_Re] - bux[s_Im]*psi[u_Im]; workPsi_Im += bux[s_Re]*psi[u_Im] + bux[s_Im]*psi[u_Re]; work[s_Re] = workPsi_Re; work[s_Im] = workPsi_Im; } void regular_Kinetic_Prop(int t) { int sx, c; double workPsi_Re, workPsi_Im; for (sx=1; sx<=NX; ++sx) { workPsi_Re = TDiag[t][RE]*host_psi[sx][RE] - TDiag[t][IM]*host_psi[sx][IM]; workPsi_Im = TDiag[t][RE]*host_psi[sx][IM] + TDiag[t][IM]*host_psi[sx][RE]; workPsi_Re += (TLower[t][sx][RE]*host_psi[sx-1][RE]) - (TLower[t][sx][IM]*host_psi[sx-1][IM]); workPsi_Im += (TLower[t][sx][RE]*host_psi[sx-1][IM]) + (TLower[t][sx][IM]*host_psi[sx-1][RE]); workPsi_Re += (TUpper[t][sx][RE]*host_psi[sx+1][RE]) - (TUpper[t][sx][IM]*host_psi[sx+1][IM]); workPsi_Im += (TUpper[t][sx][RE]*host_psi[sx+1][IM]) + (TUpper[t][sx][IM]*host_psi[sx+1][RE]); workPsi[sx][RE] = workPsi_Re; workPsi[sx][IM] = workPsi_Im; } for (sx=1; sx<=NX; ++sx) { for (c=0; c<2; ++c) { host_psi[sx][c] = workPsi[sx][c]; } } } /************************************************************************************* * HOST-DEVICE CONVERSION Functions ************************************************************************************/ __global__ void gpu_WorkToPsi(double *work, double *psi) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int sx, s_Re, s_Im; sx= tid+1; s_Re = 2*sx; s_Im = 2*sx+1; psi[s_Re] = work[s_Re]; psi[s_Im] = work[s_Im]; } void hostToDevice(double** host, double* device, int size) { int i,j; double *hostBuf; hostBuf = (double*) malloc(sizeof(double) * size * 2); for (i=0; i<size; ++i) { for (j=0; j<2; ++j) { hostBuf[2*i + j] = host[i][j]; } } cudaMemcpy((void*) device, hostBuf, 2*size*sizeof(double), cudaMemcpyHostToDevice); free(hostBuf); } void deviceToHost(double* device, double** host, int size) { int i,j; double *devBuf; devBuf = (double*) malloc(sizeof(double) * size * 2); cudaMemcpy((void*) devBuf, device, 2*size*sizeof(double), cudaMemcpyDeviceToHost); for (i=0; i<size; ++i) { for (j=0; j<2; ++j) { host[i][j] = devBuf[2*i + j]; } } free(devBuf); } /************************************************************************************** * MPI Related Functions *************************************************************************************/ /* Applies Periodic boundary conditions */ void periodic_Bounds() { int neighbor[2]; double sendBuf[2], recvBuf[2]; neighbor[right] = (myid + 1) % nProc; neighbor[left] = (myid - 1 + nProc) % nProc; // Send right, receive left sendBuf[0] = host_psi[NX][RE]; sendBuf[1] = host_psi[NX][IM]; MPI_Irecv(recvBuf, 2, MPI_DOUBLE, neighbor[left], DAEMON, MPI_COMM_WORLD, &request); MPI_Send(sendBuf, 2, MPI_DOUBLE, neighbor[right], DAEMON, MPI_COMM_WORLD); MPI_Wait(&request, &status); host_psi[0][RE] = recvBuf[RE]; host_psi[0][IM] = recvBuf[IM]; // Send left, receive right sendBuf[0] = host_psi[1][RE]; sendBuf[1] = host_psi[1][IM]; MPI_Irecv(recvBuf, 2, MPI_DOUBLE, neighbor[right], DAEMON+1, MPI_COMM_WORLD, &request); MPI_Send(sendBuf, 2, MPI_DOUBLE, neighbor[left], DAEMON+1, MPI_COMM_WORLD); MPI_Wait(&request, &status); host_psi[NX+1][RE] = recvBuf[RE]; host_psi[NX+1][IM] = recvBuf[IM]; } /* Total Energy Calculation at the current time step */ void calculate_Energy() { int sx, c; double a, b; // Apply periodic boundary conditions periodic_Bounds(); // Tridiagonal KE operators a = 1.0 / (dx * dx); b = -0.5 / (dx * dx); // | work > = (-1/2) Laplacian | Psi > for (sx=1; sx<=NX; ++sx) for (c=0; c<2; ++c) workPsi[sx][c] = a*host_psi[sx][c] + b*(host_psi[sx-1][c] + host_psi[sx+1][c]); // Expected Value for Kinetic Energy expected[_T_] = 0.0; for (sx=1; sx<=NX; ++sx) expected[_T_] += (host_psi[sx][RE]*workPsi[sx][RE] + host_psi[sx][IM]*workPsi[sx][IM]); expected[_T_] *= dx; MPI_Allreduce(&expected[_T_], &expected[_T_], 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); // Expected Value for Potential Energy expected[_V_] = 0.0; for (sx=1; sx<=NX; ++sx) expected[_V_] += potential[sx]*(host_psi[sx][RE]*host_psi[sx][RE] + host_psi[sx][IM]*host_psi[sx][IM]); expected[_V_] *= dx; MPI_Allreduce(&expected[_V_], &expected[_V_], 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); // Expected total Energy expected[_E_] = expected[_T_] + expected[_V_]; }
8a9d5ebec90016a92e9134df9fa32c4eb433388f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "../Stopwatch.hpp" // #define CUDA_CALL(x){ // const hipError_t a = (x); // if(a != cudaSucces){ // printf("\nCUDA Errors: %s (err_num = %d)\n", hipGetErrorString(a), a); // hipDeviceReset(); // assert(0); // } // } #define KERNEL_LOOP 65636 #define THREADS 128 #define BLOCKS 1024 #define N (THREADS * BLOCKS) __global__ void const_test_gpu_literal(uint32_t * const data, const uint32_t num_elements){ const uint32_t tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < num_elements){ uint32_t d = tid; for(int i = 0; i < KERNEL_LOOP; i++){ d ^= 0x55555555; d |= 0x77777777; d &= 0x33333333; d |= 0x11111111; } data[tid] = d; } }; __constant__ static const uint32_t const_data_01 = 0x55555555; __constant__ static const uint32_t const_data_02 = 0x77777777; __constant__ static const uint32_t const_data_03 = 0x33333333; __constant__ static const uint32_t const_data_04 = 0x11111111; __global__ void const_test_gpu_const( uint32_t * const data, const uint32_t num_elements){ const uint32_t tid = (blockIdx.x * blockDim.x) + threadIdx.x; if(tid < num_elements){ uint32_t d = tid; for(int i = 0; i < KERNEL_LOOP; i++ ){ d ^= const_data_01; d |= const_data_02; d &= const_data_03; d |= const_data_04; } data[tid] = d; } }; __device__ static uint32_t data_01 = 0x55555555; __device__ static uint32_t data_02 = 0x77777777; __device__ static uint32_t data_03 = 0x33333333; __device__ static uint32_t data_04 = 0x11111111; __global__ void const_test_gpu_gmem( uint32_t * const data, const uint32_t num_elements){ const uint32_t tid = (blockIdx.x * blockDim.x) + threadIdx.x; if(tid < num_elements){ uint32_t d = tid; for(int i = 0; i < KERNEL_LOOP; i++ ){ d ^= data_01; d |= data_02; d &= data_03; d |= data_04; } data[tid] = d; } }; int main(int argc, char** argv){ Stopwatch stopwatch; uint32_t* h_data = (uint32_t*) malloc(N * sizeof(uint32_t)); uint32_t* h_result1 = (uint32_t*) malloc(N * sizeof(uint32_t)); uint32_t* h_result2 = (uint32_t*) malloc(N * sizeof(uint32_t)); uint32_t* h_result3 = (uint32_t*) malloc(N * sizeof(uint32_t)); uint32_t* d_data; hipMalloc((void**)&d_data, N * sizeof(uint32_t)); stopwatch.Start(); hipMemcpy(d_data, h_data, N * sizeof(uint32_t), hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( const_test_gpu_const) , dim3(BLOCKS), dim3(THREADS), 0, 0, d_data, N); hipDeviceSynchronize(); hipMemcpy(h_result1, d_data, N * sizeof(uint32_t), hipMemcpyDeviceToHost); hipDeviceSynchronize(); stopwatch.Check_n_Reset("CONSTANT: "); stopwatch.Start(); hipMemcpy(d_data, h_data, N * sizeof(uint32_t), hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( const_test_gpu_literal) , dim3(BLOCKS), dim3(THREADS), 0, 0, d_data, N); hipDeviceSynchronize(); hipMemcpy(h_result2, d_data, N * sizeof(uint32_t), hipMemcpyDeviceToHost); hipDeviceSynchronize(); stopwatch.Check_n_Reset("LITERAL: "); stopwatch.Start(); hipMemcpy(d_data, h_data, N * sizeof(uint32_t), hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( const_test_gpu_gmem) , dim3(BLOCKS), dim3(THREADS), 0, 0, d_data, N); hipDeviceSynchronize(); hipMemcpy(h_result3, d_data, N * sizeof(uint32_t), hipMemcpyDeviceToHost); hipDeviceSynchronize(); stopwatch.Check_n_Reset("GLOBAL: "); printf("\n%d %d %d\n", h_result1[10], h_result2[10], h_result3[10]); hipFree(d_data); free(h_data); free(h_result1); free(h_result2); free(h_result3); return 0; }
8a9d5ebec90016a92e9134df9fa32c4eb433388f.cu
#include <stdio.h> #include <cuda.h> #include "../Stopwatch.hpp" // #define CUDA_CALL(x){ // const cudaError_t a = (x); // if(a != cudaSucces){ // printf("\nCUDA Errors: %s (err_num = %d)\n", cudaGetErrorString(a), a); // cudaDeviceReset(); // assert(0); // } // } #define KERNEL_LOOP 65636 #define THREADS 128 #define BLOCKS 1024 #define N (THREADS * BLOCKS) __global__ void const_test_gpu_literal(uint32_t * const data, const uint32_t num_elements){ const uint32_t tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < num_elements){ uint32_t d = tid; for(int i = 0; i < KERNEL_LOOP; i++){ d ^= 0x55555555; d |= 0x77777777; d &= 0x33333333; d |= 0x11111111; } data[tid] = d; } }; __constant__ static const uint32_t const_data_01 = 0x55555555; __constant__ static const uint32_t const_data_02 = 0x77777777; __constant__ static const uint32_t const_data_03 = 0x33333333; __constant__ static const uint32_t const_data_04 = 0x11111111; __global__ void const_test_gpu_const( uint32_t * const data, const uint32_t num_elements){ const uint32_t tid = (blockIdx.x * blockDim.x) + threadIdx.x; if(tid < num_elements){ uint32_t d = tid; for(int i = 0; i < KERNEL_LOOP; i++ ){ d ^= const_data_01; d |= const_data_02; d &= const_data_03; d |= const_data_04; } data[tid] = d; } }; __device__ static uint32_t data_01 = 0x55555555; __device__ static uint32_t data_02 = 0x77777777; __device__ static uint32_t data_03 = 0x33333333; __device__ static uint32_t data_04 = 0x11111111; __global__ void const_test_gpu_gmem( uint32_t * const data, const uint32_t num_elements){ const uint32_t tid = (blockIdx.x * blockDim.x) + threadIdx.x; if(tid < num_elements){ uint32_t d = tid; for(int i = 0; i < KERNEL_LOOP; i++ ){ d ^= data_01; d |= data_02; d &= data_03; d |= data_04; } data[tid] = d; } }; int main(int argc, char** argv){ Stopwatch stopwatch; uint32_t* h_data = (uint32_t*) malloc(N * sizeof(uint32_t)); uint32_t* h_result1 = (uint32_t*) malloc(N * sizeof(uint32_t)); uint32_t* h_result2 = (uint32_t*) malloc(N * sizeof(uint32_t)); uint32_t* h_result3 = (uint32_t*) malloc(N * sizeof(uint32_t)); uint32_t* d_data; cudaMalloc((void**)&d_data, N * sizeof(uint32_t)); stopwatch.Start(); cudaMemcpy(d_data, h_data, N * sizeof(uint32_t), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); const_test_gpu_const <<<BLOCKS, THREADS>>> (d_data, N); cudaDeviceSynchronize(); cudaMemcpy(h_result1, d_data, N * sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); stopwatch.Check_n_Reset("CONSTANT: "); stopwatch.Start(); cudaMemcpy(d_data, h_data, N * sizeof(uint32_t), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); const_test_gpu_literal <<<BLOCKS, THREADS>>> (d_data, N); cudaDeviceSynchronize(); cudaMemcpy(h_result2, d_data, N * sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); stopwatch.Check_n_Reset("LITERAL: "); stopwatch.Start(); cudaMemcpy(d_data, h_data, N * sizeof(uint32_t), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); const_test_gpu_gmem <<<BLOCKS, THREADS>>> (d_data, N); cudaDeviceSynchronize(); cudaMemcpy(h_result3, d_data, N * sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); stopwatch.Check_n_Reset("GLOBAL: "); printf("\n%d %d %d\n", h_result1[10], h_result2[10], h_result3[10]); cudaFree(d_data); free(h_data); free(h_result1); free(h_result2); free(h_result3); return 0; }
67767d18998ef64848a835e3fc43ca5b692ce1f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/reduce.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" using namespace cv::gpu; typedef unsigned char uchar; typedef unsigned short ushort; ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing namespace cv { namespace gpu { namespace device { namespace imgproc { __device__ __forceinline__ float norm2(const float& v) { return v*v; } __device__ __forceinline__ float norm2(const float2& v) { return v.x*v.x + v.y*v.y; } __device__ __forceinline__ float norm2(const float3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; } __device__ __forceinline__ float norm2(const float4& v) { return v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w; } template<typename T, typename B> __global__ void nlm_kernel(const PtrStep<T> src, PtrStepSz<T> dst, const B b, int search_radius, int block_radius, float noise_mult) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type; const int i = blockDim.y * blockIdx.y + threadIdx.y; const int j = blockDim.x * blockIdx.x + threadIdx.x; if (j >= dst.cols || i >= dst.rows) return; int bsize = search_radius + block_radius; int search_window = 2 * search_radius + 1; float minus_search_window2_inv = -1.f/(search_window * search_window); value_type sum1 = VecTraits<value_type>::all(0); float sum2 = 0.f; if (j - bsize >= 0 && j + bsize < dst.cols && i - bsize >= 0 && i + bsize < dst.rows) { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(src(i + y + ty, j + x + tx)); value_type av = saturate_cast<value_type>(src(i + ty, j + tx)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); /*if (i == 255 && j == 255) printf("%f %f\n", w, dist2 * minus_h2_inv + (x * x + y * y) * minus_search_window2_inv);*/ sum1 = sum1 + w * saturate_cast<value_type>(src(i + y, j + x)); sum2 += w; } } else { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(b.at(i + y + ty, j + x + tx, src)); value_type av = saturate_cast<value_type>(b.at(i + ty, j + tx, src)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); sum1 = sum1 + w * saturate_cast<value_type>(b.at(i + y, j + x, src)); sum2 += w; } } dst(i, j) = saturate_cast<T>(sum1 / sum2); } template<typename T, template <typename> class B> void nlm_caller(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, hipStream_t stream) { dim3 block (32, 8); dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y)); B<T> b(src.rows, src.cols); int block_window = 2 * block_radius + 1; float minus_h2_inv = -1.f/(h * h * VecTraits<T>::cn); float noise_mult = minus_h2_inv/(block_window * block_window); cudaSafeCall( hipFuncSetCacheConfig (nlm_kernel<T, B<T> >, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( nlm_kernel), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, search_radius, block_radius, noise_mult); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template<typename T> void nlm_bruteforce_gpu(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, hipStream_t stream) { typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, hipStream_t stream); static func_t funcs[] = { nlm_caller<T, BrdReflect101>, nlm_caller<T, BrdReplicate>, nlm_caller<T, BrdConstant>, nlm_caller<T, BrdReflect>, nlm_caller<T, BrdWrap>, }; funcs[borderMode](src, dst, search_radius, block_radius, h, stream); } template void nlm_bruteforce_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t); template void nlm_bruteforce_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t); template void nlm_bruteforce_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t); } }}} ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing (fast approximate version) namespace cv { namespace gpu { namespace device { namespace imgproc { template <int cn> struct Unroll; template <> struct Unroll<1> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&> tie(float& val1, float& val2) { return thrust::tie(val1, val2); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op); } }; template <> struct Unroll<2> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&> tie(float& val1, float2& val2) { return thrust::tie(val1, val2.x, val2.y); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op); } }; template <> struct Unroll<3> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&> tie(float& val1, float3& val2) { return thrust::tie(val1, val2.x, val2.y, val2.z); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op, op); } }; template <> struct Unroll<4> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE, smem + 4 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&, float&> tie(float& val1, float4& val2) { return thrust::tie(val1, val2.x, val2.y, val2.z, val2.w); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op, op, op); } }; __device__ __forceinline__ int calcDist(const uchar& a, const uchar& b) { return (a-b)*(a-b); } __device__ __forceinline__ int calcDist(const uchar2& a, const uchar2& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y); } __device__ __forceinline__ int calcDist(const uchar3& a, const uchar3& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z); } template <class T> struct FastNonLocalMeans { enum { CTA_SIZE = 128, TILE_COLS = 128, TILE_ROWS = 32, STRIDE = CTA_SIZE }; struct plus { __device__ __forceinline__ float operator()(float v1, float v2) const { return v1 + v2; } }; int search_radius; int block_radius; int search_window; int block_window; float minus_h2_inv; FastNonLocalMeans(int search_window_, int block_window_, float h) : search_radius(search_window_/2), block_radius(block_window_/2), search_window(search_window_), block_window(block_window_), minus_h2_inv(-1.f/(h * h * VecTraits<T>::cn)) {} PtrStep<T> src; mutable PtrStepi buffer; __device__ __forceinline__ void initSums_BruteForce(int i, int j, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { dist_sums[index] = 0; for(int tx = 0; tx < block_window; ++tx) col_sums(tx, index) = 0; int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j; int by = i + y - search_radius; int bx = j + x - search_radius; #if 1 for (int tx = -block_radius; tx <= block_radius; ++tx) { int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sum += dist; } col_sums(tx + block_radius, index) = col_sum; } #else for (int ty = -block_radius; ty <= block_radius; ++ty) for (int tx = -block_radius; tx <= block_radius; ++tx) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sums(tx + block_radius, index) += dist; } #endif up_col_sums(j, index) = col_sums(block_window - 1, index); } } __device__ __forceinline__ void shiftRight_FirstRow(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j + block_radius; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) col_sum += calcDist(src(ay + ty, ax), src(by + ty, bx)); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void shiftRight_UpSums(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { int ay = i; int ax = j + block_radius; T a_up = src(ay - block_radius - 1, ax); T a_down = src(ay + block_radius, ax); for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; T b_up = src(by - block_radius - 1, bx); T b_down = src(by + block_radius, bx); int col_sum = up_col_sums(j, index) + calcDist(a_down, b_down) - calcDist(a_up, b_up); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void convolve_window(int i, int j, const int* dist_sums, T& dst) const { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_type; float weights_sum = 0; sum_type sum = VecTraits<sum_type>::all(0); float bw2_inv = 1.f/(block_window * block_window); int sx = j - search_radius; int sy = i - search_radius; for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; float avg_dist = dist_sums[index] * bw2_inv; float weight = __expf(avg_dist * minus_h2_inv); weights_sum += weight; sum = sum + weight * saturate_cast<sum_type>(src(sy + y, sx + x)); } __shared__ float cta_buffer[CTA_SIZE * (VecTraits<T>::cn + 1)]; reduce<CTA_SIZE>(Unroll<VecTraits<T>::cn>::template smem_tuple<CTA_SIZE>(cta_buffer), Unroll<VecTraits<T>::cn>::tie(weights_sum, sum), threadIdx.x, Unroll<VecTraits<T>::cn>::op()); if (threadIdx.x == 0) dst = saturate_cast<T>(sum / weights_sum); } __device__ __forceinline__ void operator()(PtrStepSz<T>& dst) const { int tbx = blockIdx.x * TILE_COLS; int tby = blockIdx.y * TILE_ROWS; int tex = ::min(tbx + TILE_COLS, dst.cols); int tey = ::min(tby + TILE_ROWS, dst.rows); PtrStepi col_sums; col_sums.data = buffer.ptr(dst.cols + blockIdx.x * block_window) + blockIdx.y * search_window * search_window; col_sums.step = buffer.step; PtrStepi up_col_sums; up_col_sums.data = buffer.data + blockIdx.y * search_window * search_window; up_col_sums.step = buffer.step; extern __shared__ int dist_sums[]; //search_window * search_window int first = 0; for (int i = tby; i < tey; ++i) for (int j = tbx; j < tex; ++j) { __syncthreads(); if (j == tbx) { initSums_BruteForce(i, j, dist_sums, col_sums, up_col_sums); first = 0; } else { if (i == tby) shiftRight_FirstRow(i, j, first, dist_sums, col_sums, up_col_sums); else shiftRight_UpSums(i, j, first, dist_sums, col_sums, up_col_sums); first = (first + 1) % block_window; } __syncthreads(); convolve_window(i, j, dist_sums, dst(i, j)); } } }; template<typename T> __global__ void fast_nlm_kernel(const FastNonLocalMeans<T> fnlm, PtrStepSz<T> dst) { fnlm(dst); } void nln_fast_get_buffer_size(const PtrStepSzb& src, int search_window, int block_window, int& buffer_cols, int& buffer_rows) { typedef FastNonLocalMeans<uchar> FNLM; dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); buffer_cols = search_window * search_window * grid.y; buffer_rows = src.cols + block_window * grid.x; } template<typename T> void nlm_fast_gpu(const PtrStepSzb& src, PtrStepSzb dst, PtrStepi buffer, int search_window, int block_window, float h, hipStream_t stream) { typedef FastNonLocalMeans<T> FNLM; FNLM fnlm(search_window, block_window, h); fnlm.src = (PtrStepSz<T>)src; fnlm.buffer = buffer; dim3 block(FNLM::CTA_SIZE, 1); dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); int smem = search_window * search_window * sizeof(int); hipLaunchKernelGGL(( fast_nlm_kernel), dim3(grid), dim3(block), smem, 0, fnlm, (PtrStepSz<T>)dst); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void nlm_fast_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t); template void nlm_fast_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t); template void nlm_fast_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t); __global__ void fnlm_split_kernel(const PtrStepSz<uchar3> lab, PtrStepb l, PtrStep<uchar2> ab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar3 p = lab(y, x); ab(y,x) = make_uchar2(p.y, p.z); l(y,x) = p.x; } } void fnlm_split_channels(const PtrStepSz<uchar3>& lab, PtrStepb l, PtrStep<uchar2> ab, hipStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); hipLaunchKernelGGL(( fnlm_split_kernel), dim3(g), dim3(b), 0, 0, lab, l, ab); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } __global__ void fnlm_merge_kernel(const PtrStepb l, const PtrStep<uchar2> ab, PtrStepSz<uchar3> lab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar2 p = ab(y, x); lab(y, x) = make_uchar3(l(y, x), p.x, p.y); } } void fnlm_merge_channels(const PtrStepb& l, const PtrStep<uchar2>& ab, PtrStepSz<uchar3> lab, hipStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); hipLaunchKernelGGL(( fnlm_merge_kernel), dim3(g), dim3(b), 0, 0, l, ab, lab); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } } }}} #endif /* CUDA_DISABLER */
67767d18998ef64848a835e3fc43ca5b692ce1f2.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/reduce.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" using namespace cv::gpu; typedef unsigned char uchar; typedef unsigned short ushort; ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing namespace cv { namespace gpu { namespace device { namespace imgproc { __device__ __forceinline__ float norm2(const float& v) { return v*v; } __device__ __forceinline__ float norm2(const float2& v) { return v.x*v.x + v.y*v.y; } __device__ __forceinline__ float norm2(const float3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; } __device__ __forceinline__ float norm2(const float4& v) { return v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w; } template<typename T, typename B> __global__ void nlm_kernel(const PtrStep<T> src, PtrStepSz<T> dst, const B b, int search_radius, int block_radius, float noise_mult) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type; const int i = blockDim.y * blockIdx.y + threadIdx.y; const int j = blockDim.x * blockIdx.x + threadIdx.x; if (j >= dst.cols || i >= dst.rows) return; int bsize = search_radius + block_radius; int search_window = 2 * search_radius + 1; float minus_search_window2_inv = -1.f/(search_window * search_window); value_type sum1 = VecTraits<value_type>::all(0); float sum2 = 0.f; if (j - bsize >= 0 && j + bsize < dst.cols && i - bsize >= 0 && i + bsize < dst.rows) { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(src(i + y + ty, j + x + tx)); value_type av = saturate_cast<value_type>(src(i + ty, j + tx)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); /*if (i == 255 && j == 255) printf("%f %f\n", w, dist2 * minus_h2_inv + (x * x + y * y) * minus_search_window2_inv);*/ sum1 = sum1 + w * saturate_cast<value_type>(src(i + y, j + x)); sum2 += w; } } else { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(b.at(i + y + ty, j + x + tx, src)); value_type av = saturate_cast<value_type>(b.at(i + ty, j + tx, src)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); sum1 = sum1 + w * saturate_cast<value_type>(b.at(i + y, j + x, src)); sum2 += w; } } dst(i, j) = saturate_cast<T>(sum1 / sum2); } template<typename T, template <typename> class B> void nlm_caller(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream) { dim3 block (32, 8); dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y)); B<T> b(src.rows, src.cols); int block_window = 2 * block_radius + 1; float minus_h2_inv = -1.f/(h * h * VecTraits<T>::cn); float noise_mult = minus_h2_inv/(block_window * block_window); cudaSafeCall( cudaFuncSetCacheConfig (nlm_kernel<T, B<T> >, cudaFuncCachePreferL1) ); nlm_kernel<<<grid, block>>>((PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, search_radius, block_radius, noise_mult); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template<typename T> void nlm_bruteforce_gpu(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, cudaStream_t stream) { typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream); static func_t funcs[] = { nlm_caller<T, BrdReflect101>, nlm_caller<T, BrdReplicate>, nlm_caller<T, BrdConstant>, nlm_caller<T, BrdReflect>, nlm_caller<T, BrdWrap>, }; funcs[borderMode](src, dst, search_radius, block_radius, h, stream); } template void nlm_bruteforce_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t); template void nlm_bruteforce_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t); template void nlm_bruteforce_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t); } }}} ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing (fast approximate version) namespace cv { namespace gpu { namespace device { namespace imgproc { template <int cn> struct Unroll; template <> struct Unroll<1> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&> tie(float& val1, float& val2) { return thrust::tie(val1, val2); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op); } }; template <> struct Unroll<2> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&> tie(float& val1, float2& val2) { return thrust::tie(val1, val2.x, val2.y); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op); } }; template <> struct Unroll<3> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&> tie(float& val1, float3& val2) { return thrust::tie(val1, val2.x, val2.y, val2.z); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op, op); } }; template <> struct Unroll<4> { template <int BLOCK_SIZE> static __device__ __forceinline__ thrust::tuple<volatile float*, volatile float*, volatile float*, volatile float*, volatile float*> smem_tuple(float* smem) { return cv::gpu::device::smem_tuple(smem, smem + BLOCK_SIZE, smem + 2 * BLOCK_SIZE, smem + 3 * BLOCK_SIZE, smem + 4 * BLOCK_SIZE); } static __device__ __forceinline__ thrust::tuple<float&, float&, float&, float&, float&> tie(float& val1, float4& val2) { return thrust::tie(val1, val2.x, val2.y, val2.z, val2.w); } static __device__ __forceinline__ const thrust::tuple<plus<float>, plus<float>, plus<float>, plus<float>, plus<float> > op() { plus<float> op; return thrust::make_tuple(op, op, op, op, op); } }; __device__ __forceinline__ int calcDist(const uchar& a, const uchar& b) { return (a-b)*(a-b); } __device__ __forceinline__ int calcDist(const uchar2& a, const uchar2& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y); } __device__ __forceinline__ int calcDist(const uchar3& a, const uchar3& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z); } template <class T> struct FastNonLocalMeans { enum { CTA_SIZE = 128, TILE_COLS = 128, TILE_ROWS = 32, STRIDE = CTA_SIZE }; struct plus { __device__ __forceinline__ float operator()(float v1, float v2) const { return v1 + v2; } }; int search_radius; int block_radius; int search_window; int block_window; float minus_h2_inv; FastNonLocalMeans(int search_window_, int block_window_, float h) : search_radius(search_window_/2), block_radius(block_window_/2), search_window(search_window_), block_window(block_window_), minus_h2_inv(-1.f/(h * h * VecTraits<T>::cn)) {} PtrStep<T> src; mutable PtrStepi buffer; __device__ __forceinline__ void initSums_BruteForce(int i, int j, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { dist_sums[index] = 0; for(int tx = 0; tx < block_window; ++tx) col_sums(tx, index) = 0; int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j; int by = i + y - search_radius; int bx = j + x - search_radius; #if 1 for (int tx = -block_radius; tx <= block_radius; ++tx) { int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sum += dist; } col_sums(tx + block_radius, index) = col_sum; } #else for (int ty = -block_radius; ty <= block_radius; ++ty) for (int tx = -block_radius; tx <= block_radius; ++tx) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sums(tx + block_radius, index) += dist; } #endif up_col_sums(j, index) = col_sums(block_window - 1, index); } } __device__ __forceinline__ void shiftRight_FirstRow(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j + block_radius; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) col_sum += calcDist(src(ay + ty, ax), src(by + ty, bx)); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void shiftRight_UpSums(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { int ay = i; int ax = j + block_radius; T a_up = src(ay - block_radius - 1, ax); T a_down = src(ay + block_radius, ax); for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; T b_up = src(by - block_radius - 1, bx); T b_down = src(by + block_radius, bx); int col_sum = up_col_sums(j, index) + calcDist(a_down, b_down) - calcDist(a_up, b_up); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void convolve_window(int i, int j, const int* dist_sums, T& dst) const { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_type; float weights_sum = 0; sum_type sum = VecTraits<sum_type>::all(0); float bw2_inv = 1.f/(block_window * block_window); int sx = j - search_radius; int sy = i - search_radius; for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; float avg_dist = dist_sums[index] * bw2_inv; float weight = __expf(avg_dist * minus_h2_inv); weights_sum += weight; sum = sum + weight * saturate_cast<sum_type>(src(sy + y, sx + x)); } __shared__ float cta_buffer[CTA_SIZE * (VecTraits<T>::cn + 1)]; reduce<CTA_SIZE>(Unroll<VecTraits<T>::cn>::template smem_tuple<CTA_SIZE>(cta_buffer), Unroll<VecTraits<T>::cn>::tie(weights_sum, sum), threadIdx.x, Unroll<VecTraits<T>::cn>::op()); if (threadIdx.x == 0) dst = saturate_cast<T>(sum / weights_sum); } __device__ __forceinline__ void operator()(PtrStepSz<T>& dst) const { int tbx = blockIdx.x * TILE_COLS; int tby = blockIdx.y * TILE_ROWS; int tex = ::min(tbx + TILE_COLS, dst.cols); int tey = ::min(tby + TILE_ROWS, dst.rows); PtrStepi col_sums; col_sums.data = buffer.ptr(dst.cols + blockIdx.x * block_window) + blockIdx.y * search_window * search_window; col_sums.step = buffer.step; PtrStepi up_col_sums; up_col_sums.data = buffer.data + blockIdx.y * search_window * search_window; up_col_sums.step = buffer.step; extern __shared__ int dist_sums[]; //search_window * search_window int first = 0; for (int i = tby; i < tey; ++i) for (int j = tbx; j < tex; ++j) { __syncthreads(); if (j == tbx) { initSums_BruteForce(i, j, dist_sums, col_sums, up_col_sums); first = 0; } else { if (i == tby) shiftRight_FirstRow(i, j, first, dist_sums, col_sums, up_col_sums); else shiftRight_UpSums(i, j, first, dist_sums, col_sums, up_col_sums); first = (first + 1) % block_window; } __syncthreads(); convolve_window(i, j, dist_sums, dst(i, j)); } } }; template<typename T> __global__ void fast_nlm_kernel(const FastNonLocalMeans<T> fnlm, PtrStepSz<T> dst) { fnlm(dst); } void nln_fast_get_buffer_size(const PtrStepSzb& src, int search_window, int block_window, int& buffer_cols, int& buffer_rows) { typedef FastNonLocalMeans<uchar> FNLM; dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); buffer_cols = search_window * search_window * grid.y; buffer_rows = src.cols + block_window * grid.x; } template<typename T> void nlm_fast_gpu(const PtrStepSzb& src, PtrStepSzb dst, PtrStepi buffer, int search_window, int block_window, float h, cudaStream_t stream) { typedef FastNonLocalMeans<T> FNLM; FNLM fnlm(search_window, block_window, h); fnlm.src = (PtrStepSz<T>)src; fnlm.buffer = buffer; dim3 block(FNLM::CTA_SIZE, 1); dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); int smem = search_window * search_window * sizeof(int); fast_nlm_kernel<<<grid, block, smem>>>(fnlm, (PtrStepSz<T>)dst); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void nlm_fast_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t); template void nlm_fast_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t); template void nlm_fast_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t); __global__ void fnlm_split_kernel(const PtrStepSz<uchar3> lab, PtrStepb l, PtrStep<uchar2> ab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar3 p = lab(y, x); ab(y,x) = make_uchar2(p.y, p.z); l(y,x) = p.x; } } void fnlm_split_channels(const PtrStepSz<uchar3>& lab, PtrStepb l, PtrStep<uchar2> ab, cudaStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); fnlm_split_kernel<<<g, b>>>(lab, l, ab); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void fnlm_merge_kernel(const PtrStepb l, const PtrStep<uchar2> ab, PtrStepSz<uchar3> lab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar2 p = ab(y, x); lab(y, x) = make_uchar3(l(y, x), p.x, p.y); } } void fnlm_merge_channels(const PtrStepb& l, const PtrStep<uchar2>& ab, PtrStepSz<uchar3> lab, cudaStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); fnlm_merge_kernel<<<g, b>>>(l, ab, lab); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } } }}} #endif /* CUDA_DISABLER */
3a13b54cad1bfa19482230d720619b0338a0bd97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* David and Mikaela SYNC 6 May 2017 This is the final code for our math modeling class project. N is the number of fish. Tested up to N=2048. Make sure the THREADSPERBLOCK divides evenly into N, else problems will occur. */ //nvcc final_fish_snyc.cu -o final_fish_snyc -lglut -lm -lGLU -lGL && ./final_fish_snyc // Header Files to include: #include <GL/glut.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> // Constants for Math: #define EPSILON 0.000001 #define PI 3.1415926535 // Constants for the Window/Drawing #define EYEZ 50.0 // Sets x and y coordinates between -EYEZ and EYEZ #define DRAW 10 // Draw every 10 timesteps #define XWINDOWSIZE 800 // How many pixels in x direction #define YWINDOWSIZE 800 // How many pixels in y direction #define DIM 3 // Are we in 2D or 3D #define DT 0.005 // Time step #define STOP_TIME 20.0 // How long to go //#define STOP_TIME 0.005 // Constants for Force Calculations #define SIGHT 10.0 // How far the fish can 'see' #define WA 2.0 // Attraction Weight Ratio #define WD 50.0 // Directional Weight Ratio #define CA 15.0 // Attraction Coefficient #define CR 60.0 // Repulsion Coefficient #define CTA 5000.0 // Attraction Coefficient (Target) //#define CPR 100000000.0 // Repulsion Coefficient (predator) // Constants for Food #define P 100 //Max Number of food particles #define FOODRAD 1.0 float foodrad[P]; // Food radius may change over time. #define FOODR 0.529412 // Red of target color #define FOODG 0.807843 // Green of target color #define FOODB 0.921569 // Blue of target color // Constants for GPU #define THREADSPERBLOCK 12 // Number of threads per block // Constants for Fishies #define N 12 // Number of fish #define FISHRAD 0.4 // Radius of fish spheres #define FISHR 1.0 // 1.0 Red of fish color #define FISHG 0.3 // 0.8 Green of fish color #define FISHB 0.3 // 0.5 Blue of fish color #define FISHTAIL 3.0 // How wide tail should be #define MINSPEED 0.1 // Slowest possible speed #define MAXSPEED 25.0 // Fastest allowable speed // Constants for Wall Calculations #define BOUNDARY EYEZ-1.0 // Walls #define BDIST 10.0 // Distance from boundary at which curving should start. #define WALLSTRENGTH MAXSPEED*100.0 // How strong our walls should be // Global Variables float4 p[N]; // positions for fish, float3 v[N]; // velocities for fish, float3 f[N]; // forces for fish, float4 p_food[P]; // positions for food float3 v_food[P], f_food[P]; // velocity and force on food int fishCounter[N]; // Counter to hold how many fish are within sight radius of each fish //Globals for GPU float4 *p_GPU; float3 *v_GPU, *f_GPU; float4 *p_food_GPU; float3 *v_food_GPU, *f_food_GPU; int *fishCounter_GPU; dim3 block, grid; double SPEED = 10.0; // Multiplier for speed - LOOK INTO THIS. IT SUCKS AND FUCK IT double TIMERUNNING = 0.0; // Stores how long the nBody code has been running int PAUSE = 0; // hold whether we are pausing simulatin int FOODPOINTER = 0; // where does the first target particle start at double STARTRAD = 48.0; //how big of a box do we want to start the fish in void initializeBodies() { // A function to initialize position, velocity, and force of fish, int i; // Initializing Fish for(i=0; i<N; i++) { // Start the fish at random positions p[i].x = (((float)rand()/(float)RAND_MAX)-0.5)*STARTRAD; p[i].y = (((float)rand()/(float)RAND_MAX)-0.5)*STARTRAD; if(DIM == 3) { p[i].z = (((float)rand()/(float)RAND_MAX)*20.0)-30.0; // z positions between -30 and -10 } else { p[i].z = -20.0; } // Set mass to 1 - can change later if needed, but this is fine for now p[i].w = 1.0; // Set starting velocity to 0 v[i].x = 0.0; v[i].y = 25.0; v[i].z = 0.0; // Set starting force to 0 f[i].x = 0.0; f[i].y = 0.0; f[i].z = 0.0; } // Initialize Targets for(i=0; i<P; i++) { p_food[i].x = 10000.0; p_food[i].y = 0.0; p_food[i].z = 0.0; p[i].w = 1.0; // Set mass to 1 // Starting radius foodrad[i] = FOODRAD; // Set starting velocity to 0 v_food[i].x = 0.0; v_food[i].y = 0.0; v_food[i].z = -2.0; /// How fast the food will sink after it's dropped } block.x = THREADSPERBLOCK; block.y = 1; block.z = 1; grid.x = (N-1)/block.x + 1; grid.y = 1; grid.z = 1; hipMalloc( (void**)&p_GPU, N*sizeof(float4) ); hipMalloc( (void**)&v_GPU, N*sizeof(float3) ); hipMalloc( (void**)&f_GPU, N*sizeof(float3) ); hipMalloc( (void**)&p_food_GPU, P*sizeof(float4) ); hipMalloc( (void**)&v_food_GPU, P*sizeof(float3) ); hipMalloc( (void**)&f_food_GPU, P*sizeof(float3) ); hipMalloc( (void**)&fishCounter_GPU, N*sizeof(int) ); // Copy memory over to GPU for the first and only time hipMemcpy(p_GPU, p, N*sizeof(float4), hipMemcpyHostToDevice); hipMemcpy(v_GPU, v, N*sizeof(float3), hipMemcpyHostToDevice); hipMemcpy(f_GPU, f, N*sizeof(float3), hipMemcpyHostToDevice); hipMemcpy(p_food_GPU, p_food, P*sizeof(float4), hipMemcpyHostToDevice); hipMemcpy(v_food_GPU, v_food, P*sizeof(float3), hipMemcpyHostToDevice); hipMemcpy(f_food_GPU, f_food, P*sizeof(float3), hipMemcpyHostToDevice); hipMemcpy(fishCounter_GPU, fishCounter, N*sizeof(int), hipMemcpyHostToDevice); } void drawLines() { // Draw back lines glLineWidth(6.0); glColor3f(1.0, 1.0, 1.0); //glColor3f(0.9, 0.9, 0.9); glBegin(GL_LINES); glVertex3f(-50.0, -50.0, -40.0); glVertex3f( 50.0, -50.0, -40.0); glEnd(); glBegin(GL_LINES); glVertex3f( 50.0, -50.0, -40.0); glVertex3f( 50.0, 50.0, -40.0); glEnd(); glBegin(GL_LINES); glVertex3f( 50.0, 50.0, -40.0); glVertex3f(-50.0, 50.0, -40.0); glEnd(); glBegin(GL_LINES); glVertex3f(-50.0, 50.0, -40.0); glVertex3f(-50.0, -50.0, -40.0); glEnd(); // End lines // Draw side lines glLineWidth(10.0); //glColor3f(0.9, 0.9, 0.9); glColor3f(1.0, 1.0, 1.0); glBegin(GL_LINES); glVertex3f( 50.0, 50.0, -40.0); glVertex3f( 50.0, 50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f( 50.0, -50.0, -40.0); glVertex3f( 50.0, -50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f(-50.0, 50.0, -40.0); glVertex3f(-50.0, 50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f(-50.0, -50.0, -40.0); glVertex3f(-50.0, -50.0, - 0.0); glEnd(); // Draw front lines glLineWidth(5.0); glColor3f(1.0, 0.0, 0.0); glBegin(GL_LINES); glVertex3f(-50.0, -50.0, - 0.0); glVertex3f( 50.0, -50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f( 50.0, -50.0, - 0.0); glVertex3f( 50.0, 50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f( 50.0, 50.0, - 0.0); glVertex3f(-50.0, 50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f(-50.0, 50.0, - 0.0); glVertex3f(-50.0, -50.0, - 0.0); glEnd(); // End lines // End lines } void drawPicture() { glClear(GL_COLOR_BUFFER_BIT); glClear(GL_DEPTH_BUFFER_BIT); drawLines(); int i; // Drawing the fish for(i=0; i<N; i++) { float velMag = sqrt(v[i].x*v[i].x + v[i].y*v[i].y + v[i].z*v[i].z); glColor3d(FISHR, FISHG, FISHB); //Object color glLineWidth(FISHTAIL); // How wide should the tail line be glBegin(GL_LINES); glVertex3f(p[i].x, p[i].y, p[i].z); glVertex3f( p[i].x - (v[i].x/velMag), p[i].y - (v[i].y/velMag), p[i].z - (v[i].z/velMag) ); glEnd(); glPushMatrix(); glTranslatef(p[i].x, p[i].y, p[i].z); glutSolidSphere(FISHRAD, 10, 10); //First argument affects size. glPopMatrix(); } // Drawing the food for(i=0; i<P; i++) { //float velMag = sqrt(v_food[i].x*v[i].x + v[i].y*v[i].y + v[i].z*v[i].z); glColor3d(FOODR, FOODG, FOODB); //Object color glPushMatrix(); glTranslatef(p_food[i].x, p_food[i].y, p_food[i].z); glutSolidSphere(foodrad[i], 10, 10); //First argument affects size. glPopMatrix(); } glutSwapBuffers(); //makePicture("record1",1000,1000,1);//Call this after glutSwapBuffers. } void countFish(float4 *p) { for(int i = 0; i < N; i++) { fishCounter[i] = 0; for(int j=0; j < N; j++) { float3 d; d.x = p[j].x-p[i].x; d.y = p[j].y-p[i].y; d.z = p[j].z-p[i].z; float r2 = d.x*d.x + d.y*d.y + d.z*d.z; float r = sqrt(r2) + EPSILON; if(r < SIGHT) { fishCounter[i]++; } } //printf("There are %d fish in sight radius of fish %d\n", fishCounter[i], i); } } float3 getFishForces(float4 p0, float4 p1, float3 v1, int fishCounter) { // A function to calcultae the forces between float3 f, d; d.x = p1.x - p0.x; d.y = p1.y -p0.y; d.z = p1.z -p0.z; float r2 = d.x*d.x + d.y*d.y + d.z*d.z; float r = sqrt(r2) + EPSILON; float r4 = r2*r2 + EPSILON; //printf("There is a distance of %.2f between 2 fish\n", r); float vMag = sqrt(v1.x*v1.x + v1.y*v1.y + v1.z*v1.z); if(r < SIGHT) { f.x = WA*(CA*(d.x/r2) - CR*(d.x/r4)) + WD*(v1.x/(r*vMag))/(float)fishCounter; f.y = WA*(CA*(d.y/r2) - CR*(d.y/r4)) + WD*(v1.y/(r*vMag))/(float)fishCounter; f.z = WA*(CA*(d.z/r2) - CR*(d.z/r4)) + WD*(v1.z/(r*vMag))/(float)fishCounter; } else { f.x = 0.0; f.y = 0.0; f.z = 0.0; } return(f); } __device__ float3 getTargForces(float4 p0, float4 ptarg) { float3 f; float dx = ptarg.x - p0.x; float dy = ptarg.y - p0.y; float dz = ptarg.z - p0.z; float r2 = dx*dx + dy*dy + dz*dz + EPSILON; float r = sqrt(r2) + EPSILON; if(r < SIGHT*3.0) { f.x = CTA*dx/r2; f.y = CTA*dy/r2; f.z = CTA*dz/r2; } else { f.x = 0.0; f.y = 0.0; f.z = 0.0; } return(f); } __device__ float3 getWallForces(float4 p0, int dim) { float3 f; f.x = 0.0; f.y = 0.0; f.z = 0.0; float wallStart = BOUNDARY - BDIST; //BOUNDARY for x and y direction is 49, BDIST is 10 //wallStart is 39 in x and y direction // Right Wall if(p0.x > wallStart){f.x -= WALLSTRENGTH/(BOUNDARY - p0.x);} // Left Wall if(p0.x < -wallStart){f.x += WALLSTRENGTH/(BOUNDARY + p0.x);} // Top Wall if(p0.y > wallStart){f.y -= WALLSTRENGTH/(BOUNDARY - p0.y);} // Bottom Wall if(p0.y < -wallStart){f.y += WALLSTRENGTH/(BOUNDARY + p0.y);} if(dim == 3) { // Front Wall if(p0.z > -11.0){f.z -= WALLSTRENGTH/(-1.0 - p0.z);} // Back Wall if(p0.z < -29.0){f.z += WALLSTRENGTH/(40.0 + p0.z);} } return(f); } __device__ float3 getFishForcesDevice(float4 p0, float4 p1, float3 v1, int fishCounter) { float3 f; float dx = p1.x - p0.x; float dy = p1.y - p0.y; float dz = p1.z - p0.z; float r2 = dx*dx + dy*dy + dz*dz + EPSILON; float r = sqrt(r2) + EPSILON; float r4 = r2*r2 + EPSILON; // Check that we're within the sight radius if(r < SIGHT) { f.x = WA*(CA*(dx/r2) - CR*(dx/r4)) + WD*(v1.x/r)/(float)fishCounter; // In some versions, WD is divided by vMag. f.y = WA*(CA*(dy/r2) - CR*(dy/r4)) + WD*(v1.y/r)/(float)fishCounter; f.z = WA*(CA*(dz/r2) - CR*(dz/r4)) + WD*(v1.z/r)/(float)fishCounter; } else // If not, this particular fish does not affect the forces on p0 { f.x = 0.0; f.y = 0.0; f.z = 0.0; } return(f); } __device__ int getRadiusNumberDevice(float4 p0, float4 p1) { int fishCounter = 0; float dx = p1.x - p0.x; float dy = p1.y - p0.y; float dz = p1.z - p0.z; float r2 = dx*dx + dy*dy + dz*dz + EPSILON; float r = sqrt(r2) + EPSILON; if(r < SIGHT) { fishCounter ++; } return(fishCounter); } __global__ void getAllForcesKernel(float4 *p, float3 *v, float3 *f, int *fishCounter, float4 *p_food) { int i, j, ii; int id = threadIdx.x + blockDim.x*blockIdx.x; float3 forceSum0, forceMag, forceTarg, forceWall; float4 posMe; int cntMe; //int numberFishies; __shared__ float4 shPos[THREADSPERBLOCK]; __shared__ float3 shVel[THREADSPERBLOCK]; forceSum0.x = 0.0; forceSum0.y = 0.0; forceSum0.z = 0.0; posMe.x = p[id].x; posMe.y = p[id].y; posMe.z = p[id].z; posMe.w = p[id].w; cntMe = fishCounter[id]; for(j=0; j<gridDim.x; j++) { shPos[threadIdx.x] = p[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = v[threadIdx.x + blockDim.x*j]; //^^^Wyatt's code has blockDim.x, we have blockIdx.x __syncthreads(); #pragma unroll 32 for(i=0; i<blockDim.x; i++) { ii = i + blockDim.x*j; if(ii != id) { forceMag = getFishForcesDevice(posMe, shPos[i], shVel[i], cntMe); forceSum0.x += forceMag.x; forceSum0.y += forceMag.y; forceSum0.z += forceMag.z; } __syncthreads(); } __syncthreads(); } for(i=0; i<P; i++) { forceTarg = getTargForces(posMe, p_food[i]); forceSum0.x += forceTarg.x; forceSum0.y += forceTarg.y; forceSum0.z += forceTarg.z; __syncthreads(); } forceWall = getWallForces(posMe,DIM); forceSum0.x += forceWall.x; forceSum0.y += forceWall.y; forceSum0.z += forceWall.z; __syncthreads(); f[id].x = forceSum0.x; f[id].y = forceSum0.y; f[id].z = forceSum0.z; __syncthreads(); } void sinkFood(float4 *p, float3 *v, float3 *f, float4 *p_fish) { int i; for(i=0; i<P; i++) { // Update velocity and force. Currently not necessary. /* v[i].x += f[i].x*DT; v[i].y += f[i].y*DT; v[i].z += f[i].z*DT; float vMag = sqrt(v[i].x*v[i].x + v[i].y*v[i].y + v[i].z*v[i].z); if(vMag > MAXSPEED) { v[i].x *= (MAXSPEED/vMag); v[i].y *= (MAXSPEED/vMag); v[i].z *= (MAXSPEED/vMag); */ p[i].x += v[i].x*DT; p[i].y += v[i].y*DT; p[i].z += v[i].z*DT; // Iterate through fish to see which are eating for(int j = 0; j < N; j++) { float3 dist; dist.x = p_fish[j].x - p[i].x; dist.y = p_fish[j].y - p[i].y; dist.z = p_fish[j].z - p[i].z; float distance = sqrt(dist.x*dist.x + dist.y*dist.y + dist.z*dist.z) + EPSILON; if(distance < 1.5) { printf("Fish %i is eating food %i\n", j, i); printf("Fish %d has a position of (%.2f, %.2f, %.2f)\n", j, p_fish[j].x, p[j].y, p[j].z); printf("Food %d has a position of (%.2f, %.2f, %.2f)\n", i, p[i].x, p_food[i].y, p_food[i].z); foodrad[i] /= 1.1; if(foodrad[i]<0.1) { p_food[i].x = 1000.0; } } } //printf("Food %d has a velocity of (%.2f, %.2f, %.2f)\n", i, v[i].x, v[i].y, v[i].z); //printf("Food %d has a position of (%.2f, %.2f, %.2f)\n", i, p[i].x, p[i].y, p[i].z); } } __global__ void swimFishKernel(float4 *p, float3 *v, float3 *f) { int id = threadIdx.x + blockDim.x*blockIdx.x; v[id].x += f[id].x*DT; v[id].y += f[id].y*DT; v[id].z += f[id].z*DT; float vMag = sqrt(v[id].x*v[id].x + v[id].y*v[id].y + v[id].z*v[id].z); if(vMag > MAXSPEED) { v[id].x *= (MAXSPEED/vMag); v[id].y *= (MAXSPEED/vMag); v[id].z *= (MAXSPEED/vMag); } //printf("Fish %d has a velocity of (%.2f, %.2f, %.2f)\n", id, v[id].x, v[id].y, v[id].z); p[id].x += v[id].x*DT; p[id].y += v[id].y*DT; p[id].z += v[id].z*DT; } void nBody() { hipError_t err; countFish(p); // This is now done on the CPU instead of the GPU sinkFood(p_food, v_food, f_food, p); hipMemcpy(fishCounter_GPU, fishCounter, N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(p_food_GPU, p_food, P*sizeof(float4), hipMemcpyHostToDevice); hipLaunchKernelGGL(( getAllForcesKernel), dim3(grid), dim3(block), 0, 0, p_GPU, v_GPU, f_GPU, fishCounter_GPU, p_food_GPU); hipLaunchKernelGGL(( swimFishKernel), dim3(grid), dim3(block), 0, 0, p_GPU, v_GPU, f_GPU); hipMemcpy(p, p_GPU, N*sizeof(float4), hipMemcpyDeviceToHost); hipMemcpy(v, v_GPU, N*sizeof(float3), hipMemcpyDeviceToHost); hipMemcpy(f, f_GPU, N*sizeof(float3), hipMemcpyDeviceToHost); for(int i=0; i<10; i++) { //printf("Fish %d has a force of (%.2f, %.2f, %.2f)\n", i, f[i].x, f[i].y, f[i].z); //printf("Fish %d has a velocity of (%.2f, %.2f, %.2f)\n", i, v[i].x, v[i].y, v[i].z); //printf("Fish %d has a position of (%.2f, %.2f, %.2f)\n", i, p[i].x, p[i].y, p[i].z); } err = hipGetLastError(); if (err != 0) { printf("\n CUDA error = %s\n", hipGetErrorString(err)); //return(1); } } void keyboardFunc( unsigned char key, int x, int y ) { int i,j; switch(key) { case 'q': exit(1); case ' ': j=0; for(i=0;i<N;i++) { if(abs((int)p[i].x) + abs((int)p[i].y) + abs((int)p[i].z) < EYEZ*3.0) { j++; } } printf("There are %i fish remaining\n", j); PAUSE++; if(PAUSE == 2) { PAUSE = 0; } } } void mouseFunc( int button, int state, int x, int y ) { double coord[3]; if( button == GLUT_LEFT_BUTTON ) { if( state == GLUT_DOWN && PAUSE == 0) // when left mouse button goes down. { //printf("FOODPOINTER is %i \n", FOODPOINTER); coord[0] = (x*EYEZ*2.0/XWINDOWSIZE)-EYEZ; coord[1] = -(y*EYEZ*2.0/YWINDOWSIZE)+EYEZ; coord[2] = -1.0; printf("Food %i is at (%.4f, %.4f, %.4f)\n", FOODPOINTER, coord[0], coord[1], coord[2]); p_food[FOODPOINTER].x = coord[0]; p_food[FOODPOINTER].y = coord[1]; p_food[FOODPOINTER].z = coord[2]; foodrad[FOODPOINTER] = FOODRAD; //Change pointer to next food particle FOODPOINTER++; if(FOODPOINTER == P) { FOODPOINTER = 0; } } } } void update(int value) { if(TIMERUNNING < STOP_TIME){ if(PAUSE == 0) { nBody(); } } glutKeyboardFunc( keyboardFunc ); glutMouseFunc( mouseFunc ); glutPostRedisplay(); glutTimerFunc(1, update, 0); TIMERUNNING += DT; } void Display(void) { glClear(GL_COLOR_BUFFER_BIT); glClear(GL_DEPTH_BUFFER_BIT); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); drawPicture(); //glutSwapBuffers(); glFlush(); } void reshape(int w, int h) { glViewport(0, 0, (GLsizei) w, (GLsizei) h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glFrustum(-0.2, 0.2, -0.2, 0.2, 0.2, 150.0); glMatrixMode(GL_MODELVIEW); } int main(int argc, char** argv) { glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB); glutInitWindowSize(XWINDOWSIZE,YWINDOWSIZE); glutInitWindowPosition(0,0); glutCreateWindow(""); GLfloat light_position[] = {1.0, 1.0, 1.0, 0.0}; GLfloat light_ambient[] = {0.0, 0.0, 0.0, 1.0}; GLfloat light_diffuse[] = {1.0, 1.0, 1.0, 1.0}; GLfloat light_specular[] = {1.0, 1.0, 1.0, 1.0}; GLfloat lmodel_ambient[] = {0.2, 0.2, 0.2, 1.0}; GLfloat mat_specular[] = {1.0, 1.0, 1.0, 1.0}; GLfloat mat_shininess[] = {50.0}; //glClearColor(0.8, 0.8, 1.0, 0.0); Light blue background glClearColor(0.0, 0.0, 0.2, 0.0); // Dark blue background glShadeModel(GL_SMOOTH); glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE); glLightfv(GL_LIGHT0, GL_POSITION, light_position); glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient); glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse); glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular); glLightModelfv(GL_LIGHT_MODEL_AMBIENT, lmodel_ambient); glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular); glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); glEnable(GL_COLOR_MATERIAL); glEnable(GL_DEPTH_TEST); initializeBodies(); gluLookAt(0.0, 0.0, EYEZ, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0); glutDisplayFunc(Display); glutTimerFunc(16, update, 0); glutReshapeFunc(reshape); glutMainLoop(); return 0; }
3a13b54cad1bfa19482230d720619b0338a0bd97.cu
/* David and Mikaela SYNC 6 May 2017 This is the final code for our math modeling class project. N is the number of fish. Tested up to N=2048. Make sure the THREADSPERBLOCK divides evenly into N, else problems will occur. */ //nvcc final_fish_snyc.cu -o final_fish_snyc -lglut -lm -lGLU -lGL && ./final_fish_snyc // Header Files to include: #include <GL/glut.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> // Constants for Math: #define EPSILON 0.000001 #define PI 3.1415926535 // Constants for the Window/Drawing #define EYEZ 50.0 // Sets x and y coordinates between -EYEZ and EYEZ #define DRAW 10 // Draw every 10 timesteps #define XWINDOWSIZE 800 // How many pixels in x direction #define YWINDOWSIZE 800 // How many pixels in y direction #define DIM 3 // Are we in 2D or 3D #define DT 0.005 // Time step #define STOP_TIME 20.0 // How long to go //#define STOP_TIME 0.005 // Constants for Force Calculations #define SIGHT 10.0 // How far the fish can 'see' #define WA 2.0 // Attraction Weight Ratio #define WD 50.0 // Directional Weight Ratio #define CA 15.0 // Attraction Coefficient #define CR 60.0 // Repulsion Coefficient #define CTA 5000.0 // Attraction Coefficient (Target) //#define CPR 100000000.0 // Repulsion Coefficient (predator) // Constants for Food #define P 100 //Max Number of food particles #define FOODRAD 1.0 float foodrad[P]; // Food radius may change over time. #define FOODR 0.529412 // Red of target color #define FOODG 0.807843 // Green of target color #define FOODB 0.921569 // Blue of target color // Constants for GPU #define THREADSPERBLOCK 12 // Number of threads per block // Constants for Fishies #define N 12 // Number of fish #define FISHRAD 0.4 // Radius of fish spheres #define FISHR 1.0 // 1.0 Red of fish color #define FISHG 0.3 // 0.8 Green of fish color #define FISHB 0.3 // 0.5 Blue of fish color #define FISHTAIL 3.0 // How wide tail should be #define MINSPEED 0.1 // Slowest possible speed #define MAXSPEED 25.0 // Fastest allowable speed // Constants for Wall Calculations #define BOUNDARY EYEZ-1.0 // Walls #define BDIST 10.0 // Distance from boundary at which curving should start. #define WALLSTRENGTH MAXSPEED*100.0 // How strong our walls should be // Global Variables float4 p[N]; // positions for fish, float3 v[N]; // velocities for fish, float3 f[N]; // forces for fish, float4 p_food[P]; // positions for food float3 v_food[P], f_food[P]; // velocity and force on food int fishCounter[N]; // Counter to hold how many fish are within sight radius of each fish //Globals for GPU float4 *p_GPU; float3 *v_GPU, *f_GPU; float4 *p_food_GPU; float3 *v_food_GPU, *f_food_GPU; int *fishCounter_GPU; dim3 block, grid; double SPEED = 10.0; // Multiplier for speed - LOOK INTO THIS. IT SUCKS AND FUCK IT double TIMERUNNING = 0.0; // Stores how long the nBody code has been running int PAUSE = 0; // hold whether we are pausing simulatin int FOODPOINTER = 0; // where does the first target particle start at double STARTRAD = 48.0; //how big of a box do we want to start the fish in void initializeBodies() { // A function to initialize position, velocity, and force of fish, int i; // Initializing Fish for(i=0; i<N; i++) { // Start the fish at random positions p[i].x = (((float)rand()/(float)RAND_MAX)-0.5)*STARTRAD; p[i].y = (((float)rand()/(float)RAND_MAX)-0.5)*STARTRAD; if(DIM == 3) { p[i].z = (((float)rand()/(float)RAND_MAX)*20.0)-30.0; // z positions between -30 and -10 } else { p[i].z = -20.0; } // Set mass to 1 - can change later if needed, but this is fine for now p[i].w = 1.0; // Set starting velocity to 0 v[i].x = 0.0; v[i].y = 25.0; v[i].z = 0.0; // Set starting force to 0 f[i].x = 0.0; f[i].y = 0.0; f[i].z = 0.0; } // Initialize Targets for(i=0; i<P; i++) { p_food[i].x = 10000.0; p_food[i].y = 0.0; p_food[i].z = 0.0; p[i].w = 1.0; // Set mass to 1 // Starting radius foodrad[i] = FOODRAD; // Set starting velocity to 0 v_food[i].x = 0.0; v_food[i].y = 0.0; v_food[i].z = -2.0; /// How fast the food will sink after it's dropped } block.x = THREADSPERBLOCK; block.y = 1; block.z = 1; grid.x = (N-1)/block.x + 1; grid.y = 1; grid.z = 1; cudaMalloc( (void**)&p_GPU, N*sizeof(float4) ); cudaMalloc( (void**)&v_GPU, N*sizeof(float3) ); cudaMalloc( (void**)&f_GPU, N*sizeof(float3) ); cudaMalloc( (void**)&p_food_GPU, P*sizeof(float4) ); cudaMalloc( (void**)&v_food_GPU, P*sizeof(float3) ); cudaMalloc( (void**)&f_food_GPU, P*sizeof(float3) ); cudaMalloc( (void**)&fishCounter_GPU, N*sizeof(int) ); // Copy memory over to GPU for the first and only time cudaMemcpy(p_GPU, p, N*sizeof(float4), cudaMemcpyHostToDevice); cudaMemcpy(v_GPU, v, N*sizeof(float3), cudaMemcpyHostToDevice); cudaMemcpy(f_GPU, f, N*sizeof(float3), cudaMemcpyHostToDevice); cudaMemcpy(p_food_GPU, p_food, P*sizeof(float4), cudaMemcpyHostToDevice); cudaMemcpy(v_food_GPU, v_food, P*sizeof(float3), cudaMemcpyHostToDevice); cudaMemcpy(f_food_GPU, f_food, P*sizeof(float3), cudaMemcpyHostToDevice); cudaMemcpy(fishCounter_GPU, fishCounter, N*sizeof(int), cudaMemcpyHostToDevice); } void drawLines() { // Draw back lines glLineWidth(6.0); glColor3f(1.0, 1.0, 1.0); //glColor3f(0.9, 0.9, 0.9); glBegin(GL_LINES); glVertex3f(-50.0, -50.0, -40.0); glVertex3f( 50.0, -50.0, -40.0); glEnd(); glBegin(GL_LINES); glVertex3f( 50.0, -50.0, -40.0); glVertex3f( 50.0, 50.0, -40.0); glEnd(); glBegin(GL_LINES); glVertex3f( 50.0, 50.0, -40.0); glVertex3f(-50.0, 50.0, -40.0); glEnd(); glBegin(GL_LINES); glVertex3f(-50.0, 50.0, -40.0); glVertex3f(-50.0, -50.0, -40.0); glEnd(); // End lines // Draw side lines glLineWidth(10.0); //glColor3f(0.9, 0.9, 0.9); glColor3f(1.0, 1.0, 1.0); glBegin(GL_LINES); glVertex3f( 50.0, 50.0, -40.0); glVertex3f( 50.0, 50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f( 50.0, -50.0, -40.0); glVertex3f( 50.0, -50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f(-50.0, 50.0, -40.0); glVertex3f(-50.0, 50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f(-50.0, -50.0, -40.0); glVertex3f(-50.0, -50.0, - 0.0); glEnd(); // Draw front lines glLineWidth(5.0); glColor3f(1.0, 0.0, 0.0); glBegin(GL_LINES); glVertex3f(-50.0, -50.0, - 0.0); glVertex3f( 50.0, -50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f( 50.0, -50.0, - 0.0); glVertex3f( 50.0, 50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f( 50.0, 50.0, - 0.0); glVertex3f(-50.0, 50.0, - 0.0); glEnd(); glBegin(GL_LINES); glVertex3f(-50.0, 50.0, - 0.0); glVertex3f(-50.0, -50.0, - 0.0); glEnd(); // End lines // End lines } void drawPicture() { glClear(GL_COLOR_BUFFER_BIT); glClear(GL_DEPTH_BUFFER_BIT); drawLines(); int i; // Drawing the fish for(i=0; i<N; i++) { float velMag = sqrt(v[i].x*v[i].x + v[i].y*v[i].y + v[i].z*v[i].z); glColor3d(FISHR, FISHG, FISHB); //Object color glLineWidth(FISHTAIL); // How wide should the tail line be glBegin(GL_LINES); glVertex3f(p[i].x, p[i].y, p[i].z); glVertex3f( p[i].x - (v[i].x/velMag), p[i].y - (v[i].y/velMag), p[i].z - (v[i].z/velMag) ); glEnd(); glPushMatrix(); glTranslatef(p[i].x, p[i].y, p[i].z); glutSolidSphere(FISHRAD, 10, 10); //First argument affects size. glPopMatrix(); } // Drawing the food for(i=0; i<P; i++) { //float velMag = sqrt(v_food[i].x*v[i].x + v[i].y*v[i].y + v[i].z*v[i].z); glColor3d(FOODR, FOODG, FOODB); //Object color glPushMatrix(); glTranslatef(p_food[i].x, p_food[i].y, p_food[i].z); glutSolidSphere(foodrad[i], 10, 10); //First argument affects size. glPopMatrix(); } glutSwapBuffers(); //makePicture("record1",1000,1000,1);//Call this after glutSwapBuffers. } void countFish(float4 *p) { for(int i = 0; i < N; i++) { fishCounter[i] = 0; for(int j=0; j < N; j++) { float3 d; d.x = p[j].x-p[i].x; d.y = p[j].y-p[i].y; d.z = p[j].z-p[i].z; float r2 = d.x*d.x + d.y*d.y + d.z*d.z; float r = sqrt(r2) + EPSILON; if(r < SIGHT) { fishCounter[i]++; } } //printf("There are %d fish in sight radius of fish %d\n", fishCounter[i], i); } } float3 getFishForces(float4 p0, float4 p1, float3 v1, int fishCounter) { // A function to calcultae the forces between float3 f, d; d.x = p1.x - p0.x; d.y = p1.y -p0.y; d.z = p1.z -p0.z; float r2 = d.x*d.x + d.y*d.y + d.z*d.z; float r = sqrt(r2) + EPSILON; float r4 = r2*r2 + EPSILON; //printf("There is a distance of %.2f between 2 fish\n", r); float vMag = sqrt(v1.x*v1.x + v1.y*v1.y + v1.z*v1.z); if(r < SIGHT) { f.x = WA*(CA*(d.x/r2) - CR*(d.x/r4)) + WD*(v1.x/(r*vMag))/(float)fishCounter; f.y = WA*(CA*(d.y/r2) - CR*(d.y/r4)) + WD*(v1.y/(r*vMag))/(float)fishCounter; f.z = WA*(CA*(d.z/r2) - CR*(d.z/r4)) + WD*(v1.z/(r*vMag))/(float)fishCounter; } else { f.x = 0.0; f.y = 0.0; f.z = 0.0; } return(f); } __device__ float3 getTargForces(float4 p0, float4 ptarg) { float3 f; float dx = ptarg.x - p0.x; float dy = ptarg.y - p0.y; float dz = ptarg.z - p0.z; float r2 = dx*dx + dy*dy + dz*dz + EPSILON; float r = sqrt(r2) + EPSILON; if(r < SIGHT*3.0) { f.x = CTA*dx/r2; f.y = CTA*dy/r2; f.z = CTA*dz/r2; } else { f.x = 0.0; f.y = 0.0; f.z = 0.0; } return(f); } __device__ float3 getWallForces(float4 p0, int dim) { float3 f; f.x = 0.0; f.y = 0.0; f.z = 0.0; float wallStart = BOUNDARY - BDIST; //BOUNDARY for x and y direction is 49, BDIST is 10 //wallStart is 39 in x and y direction // Right Wall if(p0.x > wallStart){f.x -= WALLSTRENGTH/(BOUNDARY - p0.x);} // Left Wall if(p0.x < -wallStart){f.x += WALLSTRENGTH/(BOUNDARY + p0.x);} // Top Wall if(p0.y > wallStart){f.y -= WALLSTRENGTH/(BOUNDARY - p0.y);} // Bottom Wall if(p0.y < -wallStart){f.y += WALLSTRENGTH/(BOUNDARY + p0.y);} if(dim == 3) { // Front Wall if(p0.z > -11.0){f.z -= WALLSTRENGTH/(-1.0 - p0.z);} // Back Wall if(p0.z < -29.0){f.z += WALLSTRENGTH/(40.0 + p0.z);} } return(f); } __device__ float3 getFishForcesDevice(float4 p0, float4 p1, float3 v1, int fishCounter) { float3 f; float dx = p1.x - p0.x; float dy = p1.y - p0.y; float dz = p1.z - p0.z; float r2 = dx*dx + dy*dy + dz*dz + EPSILON; float r = sqrt(r2) + EPSILON; float r4 = r2*r2 + EPSILON; // Check that we're within the sight radius if(r < SIGHT) { f.x = WA*(CA*(dx/r2) - CR*(dx/r4)) + WD*(v1.x/r)/(float)fishCounter; // In some versions, WD is divided by vMag. f.y = WA*(CA*(dy/r2) - CR*(dy/r4)) + WD*(v1.y/r)/(float)fishCounter; f.z = WA*(CA*(dz/r2) - CR*(dz/r4)) + WD*(v1.z/r)/(float)fishCounter; } else // If not, this particular fish does not affect the forces on p0 { f.x = 0.0; f.y = 0.0; f.z = 0.0; } return(f); } __device__ int getRadiusNumberDevice(float4 p0, float4 p1) { int fishCounter = 0; float dx = p1.x - p0.x; float dy = p1.y - p0.y; float dz = p1.z - p0.z; float r2 = dx*dx + dy*dy + dz*dz + EPSILON; float r = sqrt(r2) + EPSILON; if(r < SIGHT) { fishCounter ++; } return(fishCounter); } __global__ void getAllForcesKernel(float4 *p, float3 *v, float3 *f, int *fishCounter, float4 *p_food) { int i, j, ii; int id = threadIdx.x + blockDim.x*blockIdx.x; float3 forceSum0, forceMag, forceTarg, forceWall; float4 posMe; int cntMe; //int numberFishies; __shared__ float4 shPos[THREADSPERBLOCK]; __shared__ float3 shVel[THREADSPERBLOCK]; forceSum0.x = 0.0; forceSum0.y = 0.0; forceSum0.z = 0.0; posMe.x = p[id].x; posMe.y = p[id].y; posMe.z = p[id].z; posMe.w = p[id].w; cntMe = fishCounter[id]; for(j=0; j<gridDim.x; j++) { shPos[threadIdx.x] = p[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = v[threadIdx.x + blockDim.x*j]; //^^^Wyatt's code has blockDim.x, we have blockIdx.x __syncthreads(); #pragma unroll 32 for(i=0; i<blockDim.x; i++) { ii = i + blockDim.x*j; if(ii != id) { forceMag = getFishForcesDevice(posMe, shPos[i], shVel[i], cntMe); forceSum0.x += forceMag.x; forceSum0.y += forceMag.y; forceSum0.z += forceMag.z; } __syncthreads(); } __syncthreads(); } for(i=0; i<P; i++) { forceTarg = getTargForces(posMe, p_food[i]); forceSum0.x += forceTarg.x; forceSum0.y += forceTarg.y; forceSum0.z += forceTarg.z; __syncthreads(); } forceWall = getWallForces(posMe,DIM); forceSum0.x += forceWall.x; forceSum0.y += forceWall.y; forceSum0.z += forceWall.z; __syncthreads(); f[id].x = forceSum0.x; f[id].y = forceSum0.y; f[id].z = forceSum0.z; __syncthreads(); } void sinkFood(float4 *p, float3 *v, float3 *f, float4 *p_fish) { int i; for(i=0; i<P; i++) { // Update velocity and force. Currently not necessary. /* v[i].x += f[i].x*DT; v[i].y += f[i].y*DT; v[i].z += f[i].z*DT; float vMag = sqrt(v[i].x*v[i].x + v[i].y*v[i].y + v[i].z*v[i].z); if(vMag > MAXSPEED) { v[i].x *= (MAXSPEED/vMag); v[i].y *= (MAXSPEED/vMag); v[i].z *= (MAXSPEED/vMag); */ p[i].x += v[i].x*DT; p[i].y += v[i].y*DT; p[i].z += v[i].z*DT; // Iterate through fish to see which are eating for(int j = 0; j < N; j++) { float3 dist; dist.x = p_fish[j].x - p[i].x; dist.y = p_fish[j].y - p[i].y; dist.z = p_fish[j].z - p[i].z; float distance = sqrt(dist.x*dist.x + dist.y*dist.y + dist.z*dist.z) + EPSILON; if(distance < 1.5) { printf("Fish %i is eating food %i\n", j, i); printf("Fish %d has a position of (%.2f, %.2f, %.2f)\n", j, p_fish[j].x, p[j].y, p[j].z); printf("Food %d has a position of (%.2f, %.2f, %.2f)\n", i, p[i].x, p_food[i].y, p_food[i].z); foodrad[i] /= 1.1; if(foodrad[i]<0.1) { p_food[i].x = 1000.0; } } } //printf("Food %d has a velocity of (%.2f, %.2f, %.2f)\n", i, v[i].x, v[i].y, v[i].z); //printf("Food %d has a position of (%.2f, %.2f, %.2f)\n", i, p[i].x, p[i].y, p[i].z); } } __global__ void swimFishKernel(float4 *p, float3 *v, float3 *f) { int id = threadIdx.x + blockDim.x*blockIdx.x; v[id].x += f[id].x*DT; v[id].y += f[id].y*DT; v[id].z += f[id].z*DT; float vMag = sqrt(v[id].x*v[id].x + v[id].y*v[id].y + v[id].z*v[id].z); if(vMag > MAXSPEED) { v[id].x *= (MAXSPEED/vMag); v[id].y *= (MAXSPEED/vMag); v[id].z *= (MAXSPEED/vMag); } //printf("Fish %d has a velocity of (%.2f, %.2f, %.2f)\n", id, v[id].x, v[id].y, v[id].z); p[id].x += v[id].x*DT; p[id].y += v[id].y*DT; p[id].z += v[id].z*DT; } void nBody() { cudaError_t err; countFish(p); // This is now done on the CPU instead of the GPU sinkFood(p_food, v_food, f_food, p); cudaMemcpy(fishCounter_GPU, fishCounter, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(p_food_GPU, p_food, P*sizeof(float4), cudaMemcpyHostToDevice); getAllForcesKernel<<<grid, block>>>(p_GPU, v_GPU, f_GPU, fishCounter_GPU, p_food_GPU); swimFishKernel<<< grid, block>>>(p_GPU, v_GPU, f_GPU); cudaMemcpy(p, p_GPU, N*sizeof(float4), cudaMemcpyDeviceToHost); cudaMemcpy(v, v_GPU, N*sizeof(float3), cudaMemcpyDeviceToHost); cudaMemcpy(f, f_GPU, N*sizeof(float3), cudaMemcpyDeviceToHost); for(int i=0; i<10; i++) { //printf("Fish %d has a force of (%.2f, %.2f, %.2f)\n", i, f[i].x, f[i].y, f[i].z); //printf("Fish %d has a velocity of (%.2f, %.2f, %.2f)\n", i, v[i].x, v[i].y, v[i].z); //printf("Fish %d has a position of (%.2f, %.2f, %.2f)\n", i, p[i].x, p[i].y, p[i].z); } err = cudaGetLastError(); if (err != 0) { printf("\n CUDA error = %s\n", cudaGetErrorString(err)); //return(1); } } void keyboardFunc( unsigned char key, int x, int y ) { int i,j; switch(key) { case 'q': exit(1); case ' ': j=0; for(i=0;i<N;i++) { if(abs((int)p[i].x) + abs((int)p[i].y) + abs((int)p[i].z) < EYEZ*3.0) { j++; } } printf("There are %i fish remaining\n", j); PAUSE++; if(PAUSE == 2) { PAUSE = 0; } } } void mouseFunc( int button, int state, int x, int y ) { double coord[3]; if( button == GLUT_LEFT_BUTTON ) { if( state == GLUT_DOWN && PAUSE == 0) // when left mouse button goes down. { //printf("FOODPOINTER is %i \n", FOODPOINTER); coord[0] = (x*EYEZ*2.0/XWINDOWSIZE)-EYEZ; coord[1] = -(y*EYEZ*2.0/YWINDOWSIZE)+EYEZ; coord[2] = -1.0; printf("Food %i is at (%.4f, %.4f, %.4f)\n", FOODPOINTER, coord[0], coord[1], coord[2]); p_food[FOODPOINTER].x = coord[0]; p_food[FOODPOINTER].y = coord[1]; p_food[FOODPOINTER].z = coord[2]; foodrad[FOODPOINTER] = FOODRAD; //Change pointer to next food particle FOODPOINTER++; if(FOODPOINTER == P) { FOODPOINTER = 0; } } } } void update(int value) { if(TIMERUNNING < STOP_TIME){ if(PAUSE == 0) { nBody(); } } glutKeyboardFunc( keyboardFunc ); glutMouseFunc( mouseFunc ); glutPostRedisplay(); glutTimerFunc(1, update, 0); TIMERUNNING += DT; } void Display(void) { glClear(GL_COLOR_BUFFER_BIT); glClear(GL_DEPTH_BUFFER_BIT); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); drawPicture(); //glutSwapBuffers(); glFlush(); } void reshape(int w, int h) { glViewport(0, 0, (GLsizei) w, (GLsizei) h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glFrustum(-0.2, 0.2, -0.2, 0.2, 0.2, 150.0); glMatrixMode(GL_MODELVIEW); } int main(int argc, char** argv) { glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB); glutInitWindowSize(XWINDOWSIZE,YWINDOWSIZE); glutInitWindowPosition(0,0); glutCreateWindow(""); GLfloat light_position[] = {1.0, 1.0, 1.0, 0.0}; GLfloat light_ambient[] = {0.0, 0.0, 0.0, 1.0}; GLfloat light_diffuse[] = {1.0, 1.0, 1.0, 1.0}; GLfloat light_specular[] = {1.0, 1.0, 1.0, 1.0}; GLfloat lmodel_ambient[] = {0.2, 0.2, 0.2, 1.0}; GLfloat mat_specular[] = {1.0, 1.0, 1.0, 1.0}; GLfloat mat_shininess[] = {50.0}; //glClearColor(0.8, 0.8, 1.0, 0.0); Light blue background glClearColor(0.0, 0.0, 0.2, 0.0); // Dark blue background glShadeModel(GL_SMOOTH); glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE); glLightfv(GL_LIGHT0, GL_POSITION, light_position); glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient); glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse); glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular); glLightModelfv(GL_LIGHT_MODEL_AMBIENT, lmodel_ambient); glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular); glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); glEnable(GL_COLOR_MATERIAL); glEnable(GL_DEPTH_TEST); initializeBodies(); gluLookAt(0.0, 0.0, EYEZ, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0); glutDisplayFunc(Display); glutTimerFunc(16, update, 0); glutReshapeFunc(reshape); glutMainLoop(); return 0; }
b3b096d5d00bc2a2e42654ee6f026b5febb0eb90.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <cmath> #define D 1 //Please set it to d in texture_main.m manually #define NG 32 //Please set it to Ng in texture_main.m manually #define DEFAULT 0 //Please set it to default in texture_main.m manually #define WINDOWDIM 9 //Please set it to WindowDim in texture_main.m manually #define BlOCKDIM 16 //Please set it to BlockDim in texture_main.m manually #define EPS 1e-8 __device__ int calInd(int Dim, int y, int x) { return y*Dim + x; } __device__ float calAvgOrRange(float *X, int n, int bool_mean) { float ans = 0.0; if (bool_mean == 1) { for (int i = 0; i < n; i++) ans += X[i]; } else { float max = X[0]; float min = max; for (int i = 0; i < n; i++) { if (X[i] > max) max = X[i]; if (X[i] < min) min = X[i]; } ans = max - min; } return ans; } __device__ float calPx(unsigned char *SDM, float sum, int i) { float px = 0.0; for (int k = 0; k < NG; k++) { px += (float)SDM[calInd(NG, k, i)] / sum; } return px; } __device__ float calPy(unsigned char *SDM, float sum, int j) { float py = 0.0; for (int k = 0; k < NG; k++) { py += (float)SDM[calInd(NG, j, k)] / sum; } return py; } __device__ float calPx_plus_y(unsigned char *SDM, float sum, int k) //k is the same as the paper listed { k = k - 2; //k = 2, 3, ..., 2NG float pxy = 0.0; int i, j; int lowerlimit, upperlimit; if (k < NG) { lowerlimit = 0; upperlimit = k + 1; } else { lowerlimit = k - NG + 1; upperlimit = NG; } for (j = lowerlimit; j < upperlimit; j++) { i = k - j; pxy += (float)SDM[calInd(NG, j, i)] / sum; } return pxy; } __device__ float calPx_minus_y(unsigned char *SDM, float sum, int k) { float pxy = 0.0; int i, j; int lowerlimit, upperlimit; lowerlimit = 0; upperlimit = NG - k; for (j = lowerlimit; j < upperlimit; j++) { i = j + k; pxy += (float)SDM[calInd(NG, j, i)] / sum; } lowerlimit = k; upperlimit = NG; for (j = lowerlimit; j < upperlimit; j++) { i = j - k; pxy += (float)SDM[calInd(NG, j, i)] / sum; } return pxy; } __device__ float2 cal_mu_std_x(unsigned char *SDM, float sum, int flag) //flag = 0 only calculate mean, flag = 1 calculate mean and std { float px[NG] = { 0.0 }; float2 ans; //ans.x is mean, ans.y is standard deviation ans.x = 0; ans.y = 0; for (int i = 0; i < NG; i++) { px[i] = calPx(SDM, sum, i); } for (int i = 0; i < NG; i++) ans.x += px[i]; ans.x = ans.x / NG; if (flag == 1) { for (int i = 0; i < NG; i++) { ans.y += (px[i] - ans.x)* (px[i] - ans.x); } ans.y = sqrt(ans.y / NG); } return ans; } __device__ float2 cal_mu_std_y(unsigned char *SDM, float sum, int flag) //flag = 0 only calculate mean, flag = 1 calculate mean and std { float py[NG] = { 0.0 }; float2 ans; //ans.x is mean, ans.y is standard deviation ans.x = 0; ans.y = 0; for (int j = 0; j < NG; j++) { py[j] = calPy(SDM, sum, j); } for (int j = 0; j < NG; j++) ans.x += py[j]; ans.x = ans.x / NG; if (flag == 1) { for (int j = 0; j < NG; j++) { ans.y += (py[j] - ans.x) * (py[j] - ans.x); } ans.y = sqrt(ans.y / NG); } return ans; } __device__ float calmu(unsigned char *SDM, float sum) { float px; float mu = 0.0; for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { px = calPx(SDM, sum, i); mu += px*i; } } return mu; } __device__ float calHXY(unsigned char *SDM, float sum) { float HXY = 0.0; float p; for (int i = 0; i < NG*NG; i++) { p = (float)SDM[i] / sum; HXY -= p*log(p + EPS); } return HXY; } __device__ float calHXY1(unsigned char *SDM, float sum) { float HXY1 = 0.0; float p; float px[NG]; float py[NG]; for (int i = 0; i < NG; i++) px[i] = calPx(SDM, sum, i); for (int j = 0; j < NG; j++) py[j] = calPy(SDM, sum, j); for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { p = (float)SDM[calInd(NG, j, i)] / sum; HXY1 -= p*log(px[i] * py[j] + EPS); } } return HXY1; } __device__ float calHXY2(unsigned char *SDM, float sum) { float HXY2 = 0.0; float p; float px[NG]; float py[NG]; for (int i = 0; i < NG; i++) px[i] = calPx(SDM, sum, i); for (int j = 0; j < NG; j++) py[j] = calPy(SDM, sum, j); for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { p = px[i] * py[j]; HXY2 -= p*log(p + EPS); } } return HXY2; } __device__ float calHX(unsigned char *SDM, float sum) { float HX = 0.0; float p; for (int i = 0; i < NG; i++) { p = calPx(SDM, sum, i); HX -= p*log(p + EPS); } return HX; } __device__ float calHY(unsigned char *SDM, float sum) { float HY = 0.0; float p; for (int j = 0; j < NG; j++) { p = calPy(SDM, sum, j); HY -= p*log(p + EPS); } return HY; } __device__ float calTexture(unsigned char *SDM, int method) { float texture = 0.0; float sum = 0.0; float p = 0.0; for (int i = 0; i < NG*NG; i++) sum += (float)SDM[i]; if (method == 0) { texture = sum; } else if (method == 1) //Angular Second Moment { if (sum == 0) texture = 0; else { for (int i = 0; i < NG*NG; i++) { p = (float)SDM[i] / sum; texture += p*p; } } } else if (method == 2) //Contrast { if (sum == 0) texture = 0; else { for (int n = 0; n < NG; n++) { texture += n*n*calPx_minus_y(SDM, sum, n); } } } else if (method == 3) //Correlation { if (sum == 0) texture = 0; else { float2 mustd_x = cal_mu_std_x(SDM, sum, 1); float2 mustd_y = cal_mu_std_y(SDM, sum, 1); for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { p += i*j*(float)SDM[calInd(NG, j, i)] / sum; } } texture = (p - mustd_x.x * mustd_y.x) / (mustd_x.y * mustd_y.y); } } else if (method == 4) //Sum of Squares: Variance { if (sum == 0) texture = 0; else { float mu = calmu(SDM, sum); for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { p = (float)SDM[calInd(NG, j, i)] / sum; texture += (i - mu)*(i - mu)*p; } } } } else if (method == 5) //Inverse Difference Moment { if (sum == 0) texture = 0; else { for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { p = (float)SDM[calInd(NG, j, i)] / sum; texture += p / (1 + (i - j)*(i - j)); } } } } else if (method == 6) //Sum Average { if (sum == 0) texture = 0; else { for (int k = 2; k <= 2 * NG; k++) { texture += k*calPx_plus_y(SDM, sum, k); } } } else if (method == 7) //Sum Variance { if (sum == 0) texture = 0; else { float pxy[2 * NG - 1]; float f8 = 0.0; for (int k = 2; k <= 2 * NG; k++) { p = calPx_plus_y(SDM, sum, k); pxy[k - 2] = p; f8 -= p*log(p + EPS); } for (int k = 2; k <= 2 * NG; k++) { texture += (k - f8)*(k - f8)*pxy[k - 2]; } } } else if (method == 8) //Sum Entropy { if (sum == 0) texture = 0; else { for (int k = 2; k <= 2 * NG; k++) { p = calPx_plus_y(SDM, sum, k); texture -= p*log(p + EPS); } } } else if (method == 9) //Entropy { if (sum == 0) texture = 0; else { texture = calHXY(SDM, sum); } } else if (method == 10) //Difference Variance { if (sum == 0) texture = 0; else { float pxy[NG]; float mean = 0.0; for (int k = 0; k < NG; k++) { pxy[k] = calPx_minus_y(SDM, sum, k); mean += pxy[k]; } mean = mean / NG; for (int k = 0; k < NG; k++) { texture += (pxy[k] - mean)*(pxy[k] - mean); } texture = texture / NG; } } else if (method == 11) //Difference Entropy { if (sum == 0) texture = 0; else { for (int k = 0; k < NG; k++) { p = calPx_minus_y(SDM, sum, k); texture -= p*log(p + EPS); } } } else if (method == 12) //Information Mesures of Correlation { if (sum == 0) texture = 0; else { float HX = calHX(SDM, sum); float HY = calHY(SDM, sum); float H; if (HX >= HY) H = HX; else H = HY; texture = (calHXY(SDM, sum) - calHXY1(SDM, sum)) / H; } } else if (method == 13) //Information Mesures of Correlation { if (sum == 0) texture = 0; else { texture = 1 - exp(-2.0*(calHXY2(SDM, sum) - calHXY(SDM, sum))); if (texture < 0) texture = 0; else texture = sqrt(texture); } } else if (method == 14) //Maximal Correlation Coefficient { /* if (sum == 0) texture = 0; else { float Q[NG*NG]; float q, pik, pjk, pxi, pyk; for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { pxi = calPx(SDM, sum, i); for (int k = 0; k < NG; k++) { pik= (float)SDM[calInd(NG, k, i)] / sum; pjk = (float)SDM[calInd(NG, k, j)] / sum; pyk = calPy(SDM, sum, k); q += (pik*pjk) / (pxi*pyk + EPS); } Q[calInd(NG, j, i)] = q; } } //Next are solving Q.TQ second largest eigenvalue, supposed to use QR decomposing. //It hardly available for a single thread in GPU. } */ } return texture; } __device__ void updateSDM(unsigned char *SDM, int value1, int value2) { SDM[calInd(NG, value1, value2)] += 1; } __device__ int convert2scale(float value, float *Q) { int rank = 0; while (Q[rank] < value && rank < NG - 1) rank += 1; return rank; } __device__ void copyAndConvertImage(float *SplitImage, int SIDim, int *SubSplitImage, int SSIDim, float *Q, int x, int y, int ix, int iy) { SubSplitImage[calInd(SSIDim, iy, ix)] = convert2scale(SplitImage[calInd(SIDim, x, y)], Q); if (ix < WINDOWDIM - 1) SubSplitImage[calInd(SSIDim, iy, ix + BlOCKDIM)] = convert2scale(SplitImage[calInd(SIDim, x + BlOCKDIM, y)], Q); if (iy < WINDOWDIM - 1) SubSplitImage[calInd(SSIDim, iy + BlOCKDIM, ix)] = convert2scale(SplitImage[calInd(SIDim, x, y + BlOCKDIM)], Q); if (ix < WINDOWDIM - 1 && iy < WINDOWDIM - 1) SubSplitImage[calInd(SSIDim, iy + BlOCKDIM, ix + BlOCKDIM)] = convert2scale(SplitImage[calInd(SIDim, x + BlOCKDIM, y + BlOCKDIM)], Q); __syncthreads(); } __global__ void gpuCalculateTexture(float *Texture, int TextureDim, float *SplitImage, int SIDim, float *Q, int method, int bool_mean) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; int ix = threadIdx.x; int iy = threadIdx.y; if (x >= TextureDim || y >= TextureDim) return; __shared__ int SubSplitImage[(BlOCKDIM + WINDOWDIM - 1)*(BlOCKDIM + WINDOWDIM - 1)]; int SSIDim = BlOCKDIM + WINDOWDIM - 1; copyAndConvertImage(SplitImage, SIDim, SubSplitImage, SSIDim, Q, x, y, ix, iy); if (SplitImage[calInd(SIDim, x + (WINDOWDIM - 1) / 2, y + (WINDOWDIM - 1) / 2)] == DEFAULT) Texture[calInd(TextureDim, x, y)] = DEFAULT; else { int jstart, jstop, istart, istop, xshift, yshift; int value1, value2; float texture[4] = { 0.0 }; for (int t = 0; t < 4; t++) { //X=D, Y=0 shift if (t == 0) { xshift = D; yshift = 0; jstart = iy; jstop = WINDOWDIM + iy; istart = ix; istop = WINDOWDIM - D + ix; } //X=D, Y=-D shift if (t == 1) { xshift = D; yshift = -1 * D; jstart = D + iy; jstop = WINDOWDIM + iy; istart = ix; istop = WINDOWDIM - D + ix; } //X=0, Y=D shift if (t == 2) { xshift = 0; yshift = D; jstart = iy; jstop = WINDOWDIM - D + iy; istart = ix; istop = WINDOWDIM + ix; } //X=D, Y=D shift if (t == 3) { xshift = D; yshift = D; jstart = iy; jstop = WINDOWDIM - D + iy; istart = ix; istop = WINDOWDIM - D + ix; } unsigned char SDM[NG*NG] = { 0 }; for (int j = jstart; j < jstop; j++) { for (int i = istart; i < istop; i++) { value1 = SubSplitImage[calInd(SSIDim, j, i)]; value2 = SubSplitImage[calInd(SSIDim, j + yshift, i + xshift)]; updateSDM(SDM, value1, value2); updateSDM(SDM, value2, value1); } } texture[t] = calTexture(SDM, method); } Texture[calInd(TextureDim, x, y)] = calAvgOrRange(texture, 4, bool_mean); } __syncthreads(); }
b3b096d5d00bc2a2e42654ee6f026b5febb0eb90.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cmath> #define D 1 //Please set it to d in texture_main.m manually #define NG 32 //Please set it to Ng in texture_main.m manually #define DEFAULT 0 //Please set it to default in texture_main.m manually #define WINDOWDIM 9 //Please set it to WindowDim in texture_main.m manually #define BlOCKDIM 16 //Please set it to BlockDim in texture_main.m manually #define EPS 1e-8 __device__ int calInd(int Dim, int y, int x) { return y*Dim + x; } __device__ float calAvgOrRange(float *X, int n, int bool_mean) { float ans = 0.0; if (bool_mean == 1) { for (int i = 0; i < n; i++) ans += X[i]; } else { float max = X[0]; float min = max; for (int i = 0; i < n; i++) { if (X[i] > max) max = X[i]; if (X[i] < min) min = X[i]; } ans = max - min; } return ans; } __device__ float calPx(unsigned char *SDM, float sum, int i) { float px = 0.0; for (int k = 0; k < NG; k++) { px += (float)SDM[calInd(NG, k, i)] / sum; } return px; } __device__ float calPy(unsigned char *SDM, float sum, int j) { float py = 0.0; for (int k = 0; k < NG; k++) { py += (float)SDM[calInd(NG, j, k)] / sum; } return py; } __device__ float calPx_plus_y(unsigned char *SDM, float sum, int k) //k is the same as the paper listed { k = k - 2; //k = 2, 3, ..., 2NG float pxy = 0.0; int i, j; int lowerlimit, upperlimit; if (k < NG) { lowerlimit = 0; upperlimit = k + 1; } else { lowerlimit = k - NG + 1; upperlimit = NG; } for (j = lowerlimit; j < upperlimit; j++) { i = k - j; pxy += (float)SDM[calInd(NG, j, i)] / sum; } return pxy; } __device__ float calPx_minus_y(unsigned char *SDM, float sum, int k) { float pxy = 0.0; int i, j; int lowerlimit, upperlimit; lowerlimit = 0; upperlimit = NG - k; for (j = lowerlimit; j < upperlimit; j++) { i = j + k; pxy += (float)SDM[calInd(NG, j, i)] / sum; } lowerlimit = k; upperlimit = NG; for (j = lowerlimit; j < upperlimit; j++) { i = j - k; pxy += (float)SDM[calInd(NG, j, i)] / sum; } return pxy; } __device__ float2 cal_mu_std_x(unsigned char *SDM, float sum, int flag) //flag = 0 only calculate mean, flag = 1 calculate mean and std { float px[NG] = { 0.0 }; float2 ans; //ans.x is mean, ans.y is standard deviation ans.x = 0; ans.y = 0; for (int i = 0; i < NG; i++) { px[i] = calPx(SDM, sum, i); } for (int i = 0; i < NG; i++) ans.x += px[i]; ans.x = ans.x / NG; if (flag == 1) { for (int i = 0; i < NG; i++) { ans.y += (px[i] - ans.x)* (px[i] - ans.x); } ans.y = sqrt(ans.y / NG); } return ans; } __device__ float2 cal_mu_std_y(unsigned char *SDM, float sum, int flag) //flag = 0 only calculate mean, flag = 1 calculate mean and std { float py[NG] = { 0.0 }; float2 ans; //ans.x is mean, ans.y is standard deviation ans.x = 0; ans.y = 0; for (int j = 0; j < NG; j++) { py[j] = calPy(SDM, sum, j); } for (int j = 0; j < NG; j++) ans.x += py[j]; ans.x = ans.x / NG; if (flag == 1) { for (int j = 0; j < NG; j++) { ans.y += (py[j] - ans.x) * (py[j] - ans.x); } ans.y = sqrt(ans.y / NG); } return ans; } __device__ float calmu(unsigned char *SDM, float sum) { float px; float mu = 0.0; for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { px = calPx(SDM, sum, i); mu += px*i; } } return mu; } __device__ float calHXY(unsigned char *SDM, float sum) { float HXY = 0.0; float p; for (int i = 0; i < NG*NG; i++) { p = (float)SDM[i] / sum; HXY -= p*log(p + EPS); } return HXY; } __device__ float calHXY1(unsigned char *SDM, float sum) { float HXY1 = 0.0; float p; float px[NG]; float py[NG]; for (int i = 0; i < NG; i++) px[i] = calPx(SDM, sum, i); for (int j = 0; j < NG; j++) py[j] = calPy(SDM, sum, j); for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { p = (float)SDM[calInd(NG, j, i)] / sum; HXY1 -= p*log(px[i] * py[j] + EPS); } } return HXY1; } __device__ float calHXY2(unsigned char *SDM, float sum) { float HXY2 = 0.0; float p; float px[NG]; float py[NG]; for (int i = 0; i < NG; i++) px[i] = calPx(SDM, sum, i); for (int j = 0; j < NG; j++) py[j] = calPy(SDM, sum, j); for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { p = px[i] * py[j]; HXY2 -= p*log(p + EPS); } } return HXY2; } __device__ float calHX(unsigned char *SDM, float sum) { float HX = 0.0; float p; for (int i = 0; i < NG; i++) { p = calPx(SDM, sum, i); HX -= p*log(p + EPS); } return HX; } __device__ float calHY(unsigned char *SDM, float sum) { float HY = 0.0; float p; for (int j = 0; j < NG; j++) { p = calPy(SDM, sum, j); HY -= p*log(p + EPS); } return HY; } __device__ float calTexture(unsigned char *SDM, int method) { float texture = 0.0; float sum = 0.0; float p = 0.0; for (int i = 0; i < NG*NG; i++) sum += (float)SDM[i]; if (method == 0) { texture = sum; } else if (method == 1) //Angular Second Moment { if (sum == 0) texture = 0; else { for (int i = 0; i < NG*NG; i++) { p = (float)SDM[i] / sum; texture += p*p; } } } else if (method == 2) //Contrast { if (sum == 0) texture = 0; else { for (int n = 0; n < NG; n++) { texture += n*n*calPx_minus_y(SDM, sum, n); } } } else if (method == 3) //Correlation { if (sum == 0) texture = 0; else { float2 mustd_x = cal_mu_std_x(SDM, sum, 1); float2 mustd_y = cal_mu_std_y(SDM, sum, 1); for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { p += i*j*(float)SDM[calInd(NG, j, i)] / sum; } } texture = (p - mustd_x.x * mustd_y.x) / (mustd_x.y * mustd_y.y); } } else if (method == 4) //Sum of Squares: Variance { if (sum == 0) texture = 0; else { float mu = calmu(SDM, sum); for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { p = (float)SDM[calInd(NG, j, i)] / sum; texture += (i - mu)*(i - mu)*p; } } } } else if (method == 5) //Inverse Difference Moment { if (sum == 0) texture = 0; else { for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { p = (float)SDM[calInd(NG, j, i)] / sum; texture += p / (1 + (i - j)*(i - j)); } } } } else if (method == 6) //Sum Average { if (sum == 0) texture = 0; else { for (int k = 2; k <= 2 * NG; k++) { texture += k*calPx_plus_y(SDM, sum, k); } } } else if (method == 7) //Sum Variance { if (sum == 0) texture = 0; else { float pxy[2 * NG - 1]; float f8 = 0.0; for (int k = 2; k <= 2 * NG; k++) { p = calPx_plus_y(SDM, sum, k); pxy[k - 2] = p; f8 -= p*log(p + EPS); } for (int k = 2; k <= 2 * NG; k++) { texture += (k - f8)*(k - f8)*pxy[k - 2]; } } } else if (method == 8) //Sum Entropy { if (sum == 0) texture = 0; else { for (int k = 2; k <= 2 * NG; k++) { p = calPx_plus_y(SDM, sum, k); texture -= p*log(p + EPS); } } } else if (method == 9) //Entropy { if (sum == 0) texture = 0; else { texture = calHXY(SDM, sum); } } else if (method == 10) //Difference Variance { if (sum == 0) texture = 0; else { float pxy[NG]; float mean = 0.0; for (int k = 0; k < NG; k++) { pxy[k] = calPx_minus_y(SDM, sum, k); mean += pxy[k]; } mean = mean / NG; for (int k = 0; k < NG; k++) { texture += (pxy[k] - mean)*(pxy[k] - mean); } texture = texture / NG; } } else if (method == 11) //Difference Entropy { if (sum == 0) texture = 0; else { for (int k = 0; k < NG; k++) { p = calPx_minus_y(SDM, sum, k); texture -= p*log(p + EPS); } } } else if (method == 12) //Information Mesures of Correlation { if (sum == 0) texture = 0; else { float HX = calHX(SDM, sum); float HY = calHY(SDM, sum); float H; if (HX >= HY) H = HX; else H = HY; texture = (calHXY(SDM, sum) - calHXY1(SDM, sum)) / H; } } else if (method == 13) //Information Mesures of Correlation { if (sum == 0) texture = 0; else { texture = 1 - exp(-2.0*(calHXY2(SDM, sum) - calHXY(SDM, sum))); if (texture < 0) texture = 0; else texture = sqrt(texture); } } else if (method == 14) //Maximal Correlation Coefficient { /* if (sum == 0) texture = 0; else { float Q[NG*NG]; float q, pik, pjk, pxi, pyk; for (int j = 0; j < NG; j++) { for (int i = 0; i < NG; i++) { pxi = calPx(SDM, sum, i); for (int k = 0; k < NG; k++) { pik= (float)SDM[calInd(NG, k, i)] / sum; pjk = (float)SDM[calInd(NG, k, j)] / sum; pyk = calPy(SDM, sum, k); q += (pik*pjk) / (pxi*pyk + EPS); } Q[calInd(NG, j, i)] = q; } } //Next are solving Q.TQ second largest eigenvalue, supposed to use QR decomposing. //It hardly available for a single thread in GPU. } */ } return texture; } __device__ void updateSDM(unsigned char *SDM, int value1, int value2) { SDM[calInd(NG, value1, value2)] += 1; } __device__ int convert2scale(float value, float *Q) { int rank = 0; while (Q[rank] < value && rank < NG - 1) rank += 1; return rank; } __device__ void copyAndConvertImage(float *SplitImage, int SIDim, int *SubSplitImage, int SSIDim, float *Q, int x, int y, int ix, int iy) { SubSplitImage[calInd(SSIDim, iy, ix)] = convert2scale(SplitImage[calInd(SIDim, x, y)], Q); if (ix < WINDOWDIM - 1) SubSplitImage[calInd(SSIDim, iy, ix + BlOCKDIM)] = convert2scale(SplitImage[calInd(SIDim, x + BlOCKDIM, y)], Q); if (iy < WINDOWDIM - 1) SubSplitImage[calInd(SSIDim, iy + BlOCKDIM, ix)] = convert2scale(SplitImage[calInd(SIDim, x, y + BlOCKDIM)], Q); if (ix < WINDOWDIM - 1 && iy < WINDOWDIM - 1) SubSplitImage[calInd(SSIDim, iy + BlOCKDIM, ix + BlOCKDIM)] = convert2scale(SplitImage[calInd(SIDim, x + BlOCKDIM, y + BlOCKDIM)], Q); __syncthreads(); } __global__ void gpuCalculateTexture(float *Texture, int TextureDim, float *SplitImage, int SIDim, float *Q, int method, int bool_mean) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; int ix = threadIdx.x; int iy = threadIdx.y; if (x >= TextureDim || y >= TextureDim) return; __shared__ int SubSplitImage[(BlOCKDIM + WINDOWDIM - 1)*(BlOCKDIM + WINDOWDIM - 1)]; int SSIDim = BlOCKDIM + WINDOWDIM - 1; copyAndConvertImage(SplitImage, SIDim, SubSplitImage, SSIDim, Q, x, y, ix, iy); if (SplitImage[calInd(SIDim, x + (WINDOWDIM - 1) / 2, y + (WINDOWDIM - 1) / 2)] == DEFAULT) Texture[calInd(TextureDim, x, y)] = DEFAULT; else { int jstart, jstop, istart, istop, xshift, yshift; int value1, value2; float texture[4] = { 0.0 }; for (int t = 0; t < 4; t++) { //X=D, Y=0 shift if (t == 0) { xshift = D; yshift = 0; jstart = iy; jstop = WINDOWDIM + iy; istart = ix; istop = WINDOWDIM - D + ix; } //X=D, Y=-D shift if (t == 1) { xshift = D; yshift = -1 * D; jstart = D + iy; jstop = WINDOWDIM + iy; istart = ix; istop = WINDOWDIM - D + ix; } //X=0, Y=D shift if (t == 2) { xshift = 0; yshift = D; jstart = iy; jstop = WINDOWDIM - D + iy; istart = ix; istop = WINDOWDIM + ix; } //X=D, Y=D shift if (t == 3) { xshift = D; yshift = D; jstart = iy; jstop = WINDOWDIM - D + iy; istart = ix; istop = WINDOWDIM - D + ix; } unsigned char SDM[NG*NG] = { 0 }; for (int j = jstart; j < jstop; j++) { for (int i = istart; i < istop; i++) { value1 = SubSplitImage[calInd(SSIDim, j, i)]; value2 = SubSplitImage[calInd(SSIDim, j + yshift, i + xshift)]; updateSDM(SDM, value1, value2); updateSDM(SDM, value2, value1); } } texture[t] = calTexture(SDM, method); } Texture[calInd(TextureDim, x, y)] = calAvgOrRange(texture, 4, bool_mean); } __syncthreads(); }
5e0618a7ea902da29955d6f8cf92208ea7b385ae.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime.h> #include "maskRCNNKernels.h" #include "plugin.h" #include <NvInfer.h> #include <assert.h> #include <hipcub/hipcub.hpp> #include <iostream> #include <stdio.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #define DUBUG_KERNEL 0 #define DUBUG_BATCH 0 #define DEBUG_T 1 #define dMIN(a, b) ((a) < (b) ? (a) : (b)) #define dMAX(a, b) ((a) > (b) ? (a) : (b)) #define dCLAMP(x, xMin, xMax) ((x) > (xMin) ? ((x) < (xMax) ? (x) : (xMax)) : (xMin)) template <typename BoxType> struct BBoxT { BoxType y1, x1, y2, x2; }; template <typename DType> __global__ void argMaxReset_kernel( int samples, int NClass, const DType* in_scores, const int* maxIdx, DType* out_scores) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int max_idx = samples * NClass; if (idx >= max_idx) return; int sampleIdx = idx / NClass; int classIdx = idx % NClass; if (classIdx != maxIdx[sampleIdx]) out_scores[idx] = 0; else out_scores[idx] = in_scores[idx]; } template <typename DType> struct ScanItem { DType data; int idx; }; template <typename DType> struct GreaterItem { __host__ __device__ __forceinline__ ScanItem<DType> operator()( const ScanItem<DType>& a, const ScanItem<DType>& b) const { return (a.data > b.data ? a : b); } }; template <typename DType> __global__ void resetMemValue_kernel(void* outPtr, int samples, float val) { DType* out = static_cast<DType*>(outPtr); int loop = gridDim.x * blockDim.x; for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < samples; idx += loop) { out[idx] = (DType) val; } } // blockDim.x : NClass // GroupDim.x : sample count // GroupDim.y : batch N // outScore : DType[ N * sample * 1 ] // outLabel : int[ N * sample * 1 ] // outBbox : int[ N * sample * 4 ] template <typename DType, typename BoxType, int Threads = 32> __global__ void argMaxGroup_kernel(int samples, int start_class_id, int NClass, const void* inScorePtr, const void* inBboxPtr, const void* validSampleCountPtr, void* outScorePtr, void* outLabelPtr, void* outBboxPtr) { const DType* inScore = static_cast<const DType*>(inScorePtr); const BoxType* inBbox = static_cast<const BoxType*>(inBboxPtr); const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); DType* outScore = static_cast<DType*>(outScorePtr); BoxType* outLabel = static_cast<BoxType*>(outLabelPtr); BoxType* outBbox = static_cast<BoxType*>(outBboxPtr); const int N = blockIdx.y; const int validSamples = validSampleCount[N]; typedef ScanItem<DType> ScanItemD; typedef hipcub::BlockReduce<ScanItemD, Threads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int iSample = blockIdx.x; iSample < validSamples; iSample += gridDim.x) { int classOffset = (N * samples + iSample) * NClass; // start from [batch, count, class0] // total IPerThread * blockDim ScanItemD maxItem = {0.0f, -1}; for (int i = start_class_id; i < NClass; i += Threads) { int curIdx = i + threadIdx.x; ScanItemD item = {0.0f, -1}; if (curIdx < NClass) { item.data = inScore[classOffset + curIdx]; item.idx = curIdx; } const int validNum = (NClass - i > Threads ? Threads : NClass - i); ScanItemD aggregate = BlockReduce(temp_storage).Reduce(item, GreaterItem<DType>(), validNum); __syncthreads(); if (aggregate.data > maxItem.data) { maxItem = aggregate; } #if DUBUG_KERNEL if (N == DUBUG_BATCH && threadIdx.x == 0 && iSample < 15 /*&& maxItem.idx >= 32*/) { printf("argMaxGroup N:%d, iSample:%d, maxItem(score:%.3f, idx:%d)validReduceNum:%d\n", N, iSample, (float) maxItem.data, maxItem.idx, validNum); } #endif } const int dstOffset = N * samples + iSample; if (threadIdx.x == 0) { outScore[dstOffset] = maxItem.data; outLabel[dstOffset] = (BoxType) maxItem.idx; outBbox[dstOffset * 4] = inBbox[(classOffset + maxItem.idx) * 4]; outBbox[dstOffset * 4 + 1] = inBbox[(classOffset + maxItem.idx) * 4 + 1]; outBbox[dstOffset * 4 + 2] = inBbox[(classOffset + maxItem.idx) * 4 + 2]; outBbox[dstOffset * 4 + 3] = inBbox[(classOffset + maxItem.idx) * 4 + 3]; } } } struct BlockClassSumPrefix { int total; // Constructor __device__ BlockClassSumPrefix() : total(0) { } // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide scan. __device__ int operator()(int aggregate) { int old = total; total += aggregate; return old; } }; #define LabelShift (DType)(2.5f) #define MinValidScore (DType)(0.01f) template <typename DType> __device__ __forceinline__ DType getKey(DType score, int lable, int NClass) { return (lable < 0 ? (DType) 0 : ((DType)(NClass - lable - 1) * LabelShift + score + MinValidScore)); } template <typename DType, typename BoxType> __device__ __forceinline__ void getScoreLable(DType key, int NClass, DType& score, BoxType& lable) { int i = key / LabelShift; score = (key <= MinValidScore ? (DType) 0 : key - (DType) i * LabelShift - MinValidScore); score = dCLAMP(score, (DType) 0, (DType) 1.0); lable = (BoxType)(key <= MinValidScore ? -1 : (NClass - i - 1)); } // blockDim.x : threads // gridDim.x : batch N // validSampleCount INPUT : int [N] // classStartPos OUTPUT: int [N * (Class + 1)], need memset to zero before this kernel // outScore OUTPUT : DType [N * samples] // outLabel OUTPUT : int [N * samples] // outSampleIdx OUTPUT : int [N * samples] // outValidSampleCount : int [N] // IPerThread * Threads >= sample-count #define MaxClassNum 255 template <typename DType, typename BoxType, int Threads = 256, int IPerThread = 4> __global__ void sortPerClass_kernel( // int N, int samples, int NClass, int background, float scoreThreshold, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, void* classStartPosPtr, void* outScorePtr, void* outLabelPtr, void* outSampleIdxPtr, void* outValidSampleCountPtr) { typedef cub::BlockExchange<DType, Threads, IPerThread> BlockExchangeKey; typedef cub::BlockExchange<int, Threads, IPerThread> BlockExchangeI; typedef cub::BlockRadixSort<DType, Threads, IPerThread, int> BlockRadixSort; typedef hipcub::BlockScan<int, Threads> BlockScanClass; __shared__ union { typename BlockExchangeKey::TempStorage storageKey; typename BlockExchangeI::TempStorage storageI; typename BlockRadixSort::TempStorage storageSort; typename BlockScanClass::TempStorage storageScan; } temp_storage; __shared__ int smemClassCount[MaxClassNum]; assert(NClass < MaxClassNum); assert(IPerThread * Threads >= samples); const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); const DType* inScore = static_cast<const DType*>(inScorePtr); const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr); int* classStartPos = static_cast<int*>(classStartPosPtr); DType* outScore = static_cast<DType*>(outScorePtr); BoxType* outLabel = static_cast<BoxType*>(outLabelPtr); int* outSampleIdx = static_cast<int*>(outSampleIdxPtr); int* outValidSampleCount = static_cast<int*>(outValidSampleCountPtr); for (int s = threadIdx.x; s < NClass + 1; s += blockDim.x) { smemClassCount[s] = 0; } int N = blockIdx.x; int blockOffset = N * samples; int validSamples = validSampleCount[N]; DType key[IPerThread]; int iSample[IPerThread]; for (int i = 0; i < IPerThread; ++i) { iSample[i] = -1; key[i] = -1.0f; int curIdx = i * Threads + threadIdx.x; if (curIdx < validSamples) { int label = (int) (inLabel[blockOffset + curIdx]); DType score = inScore[blockOffset + curIdx]; if (label != background && label != -1 && score >= (DType) scoreThreshold) { key[i] = getKey(score, label, NClass); iSample[i] = curIdx; } } } BlockExchangeKey(temp_storage.storageKey).StripedToBlocked(key); __syncthreads(); BlockExchangeI(temp_storage.storageI).StripedToBlocked(iSample); __syncthreads(); BlockRadixSort(temp_storage.storageSort).SortDescendingBlockedToStriped(key, iSample); __syncthreads(); // store Idx cub::StoreDirectStriped<Threads>(threadIdx.x, outSampleIdx + blockOffset, iSample, validSamples); BoxType lable[IPerThread]; DType score[IPerThread]; #pragma unroll for (int i = 0; i < IPerThread; ++i) { getScoreLable(key[i], NClass, score[i], lable[i]); } cub::StoreDirectStriped<Threads>(threadIdx.x, outScore + blockOffset, score, validSamples); cub::StoreDirectStriped<Threads>(threadIdx.x, outLabel + blockOffset, lable, validSamples); // final for (int i = 0; i < IPerThread; ++i) { if (lable[i] >= (BoxType) 0) { atomicAdd(&smemClassCount[(int) lable[i]], 1); } } __syncthreads(); int classBlockOffset = N * (NClass + 1); // Exclusive-sum, 1st is 0, last is final sum #if DUBUG_KERNEL if (N == DUBUG_BATCH && threadIdx.x == 0) { printf("sortPerClass(N:%d) final count of each label, valid samples:%d\n", N, validSamples); for (int k = 0; k < NClass; ++k) { if (smemClassCount[k] > 0) printf("Batch:%d, L:%d, count:%d, \n", N, k, smemClassCount[k]); } } __syncthreads(); #endif BlockClassSumPrefix sumPrefix; for (int s = 0; s < NClass; s += blockDim.x) { // s start from block int iClassSamples = 0; int iClass = s + threadIdx.x; if (iClass < NClass) { iClassSamples = smemClassCount[iClass]; } BlockScanClass(temp_storage.storageScan).ExclusiveSum(iClassSamples, iClassSamples, sumPrefix); __syncthreads(); if (iClass < NClass) { classStartPos[classBlockOffset + iClass] = iClassSamples; } } if (threadIdx.x == 0) { classStartPos[classBlockOffset + NClass] = sumPrefix.total; assert(sumPrefix.total <= validSamples); // background data removed. outValidSampleCount[N] = sumPrefix.total; #if DUBUG_KERNEL if (N == DUBUG_BATCH) printf("After sortPerClass, batch:%d valid samples total:%d\n", N, sumPrefix.total); #endif } } template <typename DType> __device__ __forceinline__ BBoxT<DType> readBbox(const BBoxT<DType>* inBbox, int idx) { BBoxT<DType> ret = ((BBoxT<DType>*) (inBbox))[idx]; return ret; } template <typename DType> __device__ __forceinline__ DType boxIoU(const BBoxT<DType>& a, const BBoxT<DType>& b) { BBoxT<DType> overlap = { dMAX(a.y1, b.y1), dMAX(a.x1, b.x1), dMIN(a.y2, b.y2), dMIN(a.x2, b.x2), }; DType oW = overlap.x2 - overlap.x1; DType oH = overlap.y2 - overlap.y1; if (oW < (DType) 0 || oH < (DType) 0) return (DType) 0; DType oA = oW * oH; return (oA / ((a.y2 - a.y1) * (a.x2 - a.x1) + (b.y2 - b.y1) * (b.x2 - b.x1) - oA)); } // PerClassNMS // gridDim.x : batch-N // blockDim.x : Threads // ItemsPerThreads : = divUp(samples, Threads) // outFlagSamples OUT: int [N * samples] template <typename DType, typename BoxType, int Threads = 256, int ItemsPerThreads = 4> __global__ void PerClassNMS_kernel( // int N, int samples, int NClass, const float nmsThreshold, const void* validSampleCountPtr, // const void *inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* classStartsPtr, void* outFlagSamplesPtr) { typedef BBoxT<BoxType> BBox; __shared__ struct { BBox refBox[MaxClassNum]; int endIdx[MaxClassNum]; int refIdx[MaxClassNum + 1]; bool markSamples[Threads * ItemsPerThreads]; int done; } smemClasses; assert(NClass + 1 < MaxClassNum); assert(samples <= Threads * ItemsPerThreads); const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); // const DType *inScore = static_cast<const DType *>(inScorePtr); const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr); const BBox* inBbox = static_cast<const BBox*>(inBboxPtr); const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr); const int* classStarts = static_cast<const int*>(classStartsPtr); int* outFlagSamples = static_cast<int*>(outFlagSamplesPtr); int N = blockIdx.x; int blockOffset = N * samples; int validSamples = validSampleCount[N]; if (threadIdx.x == 0) { smemClasses.done = 0; } BBox curBox[ItemsPerThreads]; int label[ItemsPerThreads]; #pragma unroll for (int ite = 0; ite * blockDim.x < validSamples; ++ite) { int curIdx = ite * blockDim.x + threadIdx.x; if (curIdx < validSamples) { label[ite] = (int) inLabel[blockOffset + curIdx]; curBox[ite] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + curIdx]); } else { label[ite] = -1; } smemClasses.markSamples[curIdx] = (label[ite] < 0 ? false : true); } int classBlockOffset = N * (NClass + 1); for (int i = threadIdx.x; i < NClass + 1; i += blockDim.x) { int refIdx = classStarts[classBlockOffset + i]; smemClasses.refIdx[i] = refIdx; smemClasses.refBox[i] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]); } __syncthreads(); for (int i = threadIdx.x; i < NClass; i += blockDim.x) { int endIdx = smemClasses.refIdx[i + 1]; smemClasses.endIdx[i] = endIdx; if (endIdx == smemClasses.refIdx[i]) { atomicAdd(&smemClasses.done, 1); } } __syncthreads(); #if DUBUG_KERNEL // print info if (N == DUBUG_BATCH && threadIdx.x == 0) { printf("batch:%d, before starting NMS, done count:%d\n", N, smemClasses.done); printf("batch:%d, Total num:%d, startPos:\n", N, validSamples); for (int k = 0; k < NClass; ++k) { if (smemClasses.refIdx[k] != smemClasses.endIdx[k]) { printf("Batch:%d, label:%d [%d : %d], check ref-label:%d\n", N, k, smemClasses.refIdx[k], smemClasses.endIdx[k], (int) inLabel[blockOffset + smemClasses.refIdx[k]]); } } printf("\n"); } __syncthreads(); #endif // class done to check stop point while (smemClasses.done < NClass) { for (int ite = 0; ite * blockDim.x < validSamples; ++ite) { int curIdx = ite * blockDim.x + threadIdx.x; int refIdx = -1; int endIdx = -1; if (curIdx < validSamples && smemClasses.markSamples[curIdx]) { if (label[ite] >= 0) { refIdx = smemClasses.refIdx[label[ite]]; endIdx = smemClasses.endIdx[label[ite]]; if (curIdx > refIdx && curIdx < endIdx) { BBox refBox = smemClasses.refBox[label[ite]]; if (boxIoU(refBox, curBox[ite]) > (DType) nmsThreshold) { smemClasses.markSamples[curIdx] = false; } } } } } __syncthreads(); // push refIdx/refBox forward to next mark // only the refIdx thread to push itself. other threads idle for (int i = threadIdx.x; i < NClass; i += blockDim.x) { int refIdx = smemClasses.refIdx[i]; int endIdx = smemClasses.endIdx[i]; if (refIdx < endIdx) { do { ++refIdx; } while (refIdx < endIdx && smemClasses.markSamples[refIdx] == false); smemClasses.refIdx[i] = refIdx; if (refIdx < endIdx) { smemClasses.refBox[i] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]); } else { atomicAdd(&smemClasses.done, 1); } } } __syncthreads(); } // no need to write all data out for (int segment = 0; segment < validSamples; segment += blockDim.x) { int curIdx = segment + threadIdx.x; if (curIdx < validSamples) { outFlagSamples[blockOffset + curIdx] = (smemClasses.markSamples[curIdx] ? 1 : 0); } } } // TopKGather // gridDim.x : batch-N // blockDim.x : Threads // ItemsPerThreads : = divUp(samples, Threads) // outDetectionCount : int [N], must be set 0 before kernel #define MaxItemsPerThreads 8 template <typename DType, typename BoxType, int Threads = 256> __global__ void TopKGatherProposal_kernel(int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outBboxPtr) { typedef BBoxT<BoxType> BBox; typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1; typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2; typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3; typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4; typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5; typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6; typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7; typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8; __shared__ union { typename BlockRadixSort8::TempStorage sort8; typename BlockRadixSort7::TempStorage sort7; typename BlockRadixSort6::TempStorage sort6; typename BlockRadixSort5::TempStorage sort5; typename BlockRadixSort4::TempStorage sort4; typename BlockRadixSort3::TempStorage sort3; typename BlockRadixSort2::TempStorage sort2; typename BlockRadixSort1::TempStorage sort1; } temp_storage; assert(MaxItemsPerThreads * Threads >= samples); const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); const DType* inScore = static_cast<const DType*>(inScorePtr); const BBox* inBbox = static_cast<const BBox*>(inBboxPtr); const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr); const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr); BBox* outBbox = static_cast<BBox*>(outBboxPtr); int N = blockIdx.x; int blockOffset = N * samples; int validSamples = validSampleCount[N]; int finalTopK = dMIN(keepTopK, validSamples); int idx[MaxItemsPerThreads]; DType score[MaxItemsPerThreads]; int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x; for (int ite = 0; ite < totalItems; ++ite) { int curIdx = ite * blockDim.x + threadIdx.x; if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx]) { idx[ite] = curIdx; score[ite] = inScore[blockOffset + curIdx]; } else { idx[ite] = -1; score[ite] = 0.0f; } } switch (totalItems) { case 0: break; case 1: BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx); break; case 2: BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx); break; case 3: BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx); break; case 4: BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx); break; case 5: BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx); break; case 6: BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx); break; case 7: BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx); break; case 8: BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx); break; default: assert(false); } __syncthreads(); int outBlockOffset = N * keepTopK; int topkItems = (keepTopK + (Threads - 1)) / Threads; for (int i = 0; i < topkItems; ++i) { int curI = i * blockDim.x + threadIdx.x; if (curI < keepTopK) { BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f}; if (curI < finalTopK && idx[i] >= 0 && score[i] > MinValidScore) { oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]]; } ((BBox*) outBbox)[outBlockOffset + curI] = oB; } } } #define MaxItemsPerThreads 8 template <typename DType, typename BoxType, int Threads = 256> __global__ void TopKGather_kernel(int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outDetectionPtr) { typedef BBoxT<BoxType> BBox; typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1; typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2; typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3; typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4; typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5; typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6; typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7; typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8; __shared__ union { typename BlockRadixSort8::TempStorage sort8; typename BlockRadixSort7::TempStorage sort7; typename BlockRadixSort6::TempStorage sort6; typename BlockRadixSort5::TempStorage sort5; typename BlockRadixSort4::TempStorage sort4; typename BlockRadixSort3::TempStorage sort3; typename BlockRadixSort2::TempStorage sort2; typename BlockRadixSort1::TempStorage sort1; } temp_storage; assert(MaxItemsPerThreads * Threads >= samples); const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); const DType* inScore = static_cast<const DType*>(inScorePtr); const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr); // InLabel keeps INT32 const BBox* inBbox = static_cast<const BBox*>(inBboxPtr); const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr); const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr); DType* outDetections = static_cast<DType*>(outDetectionPtr); int N = blockIdx.x; int blockOffset = N * samples; int validSamples = validSampleCount[N]; int finalTopK = dMIN(keepTopK, validSamples); int idx[MaxItemsPerThreads]; DType score[MaxItemsPerThreads]; int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x; for (int ite = 0; ite < totalItems; ++ite) { int curIdx = ite * blockDim.x + threadIdx.x; if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx]) { idx[ite] = curIdx; score[ite] = inScore[blockOffset + curIdx]; } else { idx[ite] = -1; score[ite] = 0.0f; } } switch (totalItems) { case 0: break; case 1: BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx); break; case 2: BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx); break; case 3: BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx); break; case 4: BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx); break; case 5: BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx); break; case 6: BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx); break; case 7: BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx); break; case 8: BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx); break; default: assert(false); } __syncthreads(); int outBlockOffset = N * keepTopK; int topkItems = (keepTopK + (Threads - 1)) / Threads; for (int i = 0; i < topkItems; ++i) { int curI = i * blockDim.x + threadIdx.x; if (curI < keepTopK) { BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f}; DType oS = 0.0f; BoxType oL = -1; if (curI < finalTopK && idx[i] >= 0 && score[i] > MinValidScore) { oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]]; oS = score[i]; oL = (BoxType) inLabel[blockOffset + idx[i]]; } outDetections[(outBlockOffset + curI) * 6] = oB.y1; outDetections[(outBlockOffset + curI) * 6 + 1] = oB.x1; outDetections[(outBlockOffset + curI) * 6 + 2] = oB.y2; outDetections[(outBlockOffset + curI) * 6 + 3] = oB.x2; outDetections[(outBlockOffset + curI) * 6 + 4] = oL; outDetections[(outBlockOffset + curI) * 6 + 5] = oS; } } } RefineDetectionWorkSpace::RefineDetectionWorkSpace( const int batchSize, const int sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType inType) : argMaxScoreDims(sampleCount, 1) , argMaxBboxDims(sampleCount, 4) , argMaxLabelDims(sampleCount, 1) , sortClassScoreDims(sampleCount, 1) , sortClassLabelDims(sampleCount, 1) , sortClassSampleIdxDims(sampleCount + 1, 1) , sortClassPosDims(param.numClasses + 1, 1) , sortNMSMarkDims(sampleCount, 1) { size_t sumSize = 0; const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT; // resource // arMaxScore : [N, samples] : m_Type argMaxScoreOffset = sumSize; sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize); argMaxBboxOffset = sumSize; // argMaxBbox : [N, samples, 4] : m_Type sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize); argMaxLabelOffset = sumSize; // argMaxLabel : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassScoreOffset = sumSize; // sortClassScore : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize); sortClassLabelOffset = sumSize; // sortClassLabel : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize); sortClassSampleIdxOffset = sumSize; // sortClassSampleIdx : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassValidCountOffset = sumSize; // sortClassValidCount : [N, 1] : kINT32 sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassPosOffset = sumSize; // sortClassPos : [N, numClasses+1] : kINT32 sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortNMSMarkOffset = sumSize; // sortNMSMark : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); totalSize = sumSize; } ProposalWorkSpace::ProposalWorkSpace(const int batchSize, const int inputCnt, const int sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType inType) : preRefineScoreDims(inputCnt, 1) , preRefineSortedScoreDims(inputCnt, 1) , preRefineBboxDims(inputCnt, 4) , argMaxScoreDims(sampleCount, 1) , argMaxBboxDims(sampleCount, 4) , argMaxLabelDims(sampleCount, 1) , sortClassScoreDims(sampleCount, 1) , sortClassLabelDims(sampleCount, 1) , sortClassSampleIdxDims(sampleCount, 1) , sortClassPosDims(param.numClasses + 1, 1) , sortNMSMarkDims(sampleCount, 1) { size_t sumSize = 0; const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT; // resource // temp storage size for sorting scores tempStorageOffset = sumSize; sumSize += (1 << 23) * batchSize; // preRefineScore : [N, inputcnt, 1] // extracted foreground score from inputs[0] preRefineScoreOffset = sumSize; sumSize += AlignMem(dimVolume(preRefineScoreDims) * typeSize(type) * batchSize); // preRefineSortedScore: [N, inputcnt, 1] preRefineSortedScoreOffset = sumSize; sumSize += AlignMem(dimVolume(preRefineSortedScoreDims) * typeSize(type) * batchSize); // preRefineBbox: [N, inputcnt, 4] // sorted bbox preRefineBboxOffset = sumSize; sumSize += AlignMem(dimVolume(preRefineBboxDims) * typeSize(type) * batchSize); // arMaxScore : [N, samples] : m_Type argMaxScoreOffset = sumSize; sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize); argMaxBboxOffset = sumSize; // argMaxBbox : [N, samples, 4] : m_Type sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize); argMaxLabelOffset = sumSize; // argMaxLabel : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassScoreOffset = sumSize; // sortClassScore : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize); sortClassLabelOffset = sumSize; // sortClassLabel : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize); sortClassSampleIdxOffset = sumSize; // sortClassSampleIdx : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassValidCountOffset = sumSize; // sortClassValidCount : [N, 1] : kINT32 sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassPosOffset = sumSize; // sortClassPos : [N, numClasses+1] : kINT32 sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortNMSMarkOffset = sumSize; // sortNMSMark : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); totalSize = sumSize; } MultilevelProposeROIWorkSpace::MultilevelProposeROIWorkSpace(const int batchSize, const int inputCnt, const int sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType inType) : preRefineSortedScoreDims(inputCnt, 1) , preRefineBboxDims(inputCnt, 4) , argMaxScoreDims(sampleCount, 1) , argMaxBboxDims(sampleCount, 4) , argMaxLabelDims(sampleCount, 1) , sortClassScoreDims(sampleCount, 1) , sortClassLabelDims(sampleCount, 1) , sortClassSampleIdxDims(sampleCount+1, 1) , sortClassPosDims(param.numClasses + 1, 1) , sortNMSMarkDims(sampleCount, 1) { size_t sumSize = 0; const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT; // resource // temp storage size for sorting scores tempStorageOffset = sumSize; sumSize += (1 << 23) * batchSize; // preRefineSortedScore: [N, inputcnt, 1] preRefineSortedScoreOffset = sumSize; sumSize += AlignMem(dimVolume(preRefineSortedScoreDims) * typeSize(type) * batchSize); // preRefineBbox: [N, inputcnt, 4] // sorted bbox preRefineBboxOffset = sumSize; sumSize += AlignMem(dimVolume(preRefineBboxDims) * typeSize(type) * batchSize); // argMaxScore : [N, samples] : m_Type argMaxScoreOffset = sumSize; sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize); argMaxBboxOffset = sumSize; // argMaxBbox : [N, samples, 4] : m_Type sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize); argMaxLabelOffset = sumSize; // argMaxLabel : [N, samples] : m_Type sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(type) * batchSize); sortClassScoreOffset = sumSize; // sortClassScore : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize); sortClassLabelOffset = sumSize; // sortClassLabel : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize); sortClassSampleIdxOffset = sumSize; // sortClassSampleIdx : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassValidCountOffset = sumSize; // sortClassValidCount : [N, 1] : kINT32 sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassPosOffset = sumSize; // sortClassPos : [N, numClasses+1] : kINT32 sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortNMSMarkOffset = sumSize; // sortNMSMark : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); totalSize = sumSize; } ConcatTopKWorkSpace::ConcatTopKWorkSpace(const int batchSize, const int concatCnt, const int topK,const nvinfer1::DataType inType) : concatedScoreDims(concatCnt*topK, 1) , concatedBBoxDims(concatCnt*topK, 4) , sortedScoreDims(concatCnt*topK, 1) , sortedBBoxDims(concatCnt*topK, 4) { size_t sumSize = 0; const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT; // resource // temp storage size for sorting scores tempStorageOffset = sumSize; sumSize += (1 << 23) * batchSize; // concatedScoreOffset: [N, concatCnt*topK, 1] concatedScoreOffset = sumSize; sumSize += AlignMem(dimVolume(concatedScoreDims) * typeSize(type) * batchSize); // concatedBBoxOffset: [N, concatCnt*topK, 4] concatedBBoxOffset = sumSize; sumSize += AlignMem(dimVolume(concatedBBoxDims) * typeSize(type) * batchSize); // sortedScoreOffset: [N, concatCnt * topK, 1] sortedScoreOffset = sumSize; sumSize += AlignMem(dimVolume(sortedScoreDims) * typeSize(type) * batchSize); // sortedBBoxOffset: [N, concatCnt * topK, 4] sortedBBoxOffset = sumSize; sumSize += AlignMem(dimVolume(sortedBBoxDims) * typeSize(type) * batchSize); totalSize = sumSize; } template <int Threads> hipError_t argMaxGroup(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass, const void* inScore, const void* inBbox, const void* validSamples, void* outScore, void* outLabel, void* outBbox) { int maxGridX = dMIN(samples, 512 / N); dim3 gridDim = {(unsigned int) nAlignDown(maxGridX, 32), (unsigned int) N, 1}; dim3 threads = {Threads, 1, 1}; switch (dtype) { case nvinfer1::DataType::kFLOAT: hipLaunchKernelGGL(( argMaxGroup_kernel<float, float, Threads>), dim3(gridDim), dim3(threads), 0, stream, samples, 0, NClass, inScore, inBbox, validSamples, outScore, outLabel, outBbox); break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return hipGetLastError(); } template <int Threads> hipError_t argMaxWOBackground(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass, const void* inScore, const void* inBbox, const void* validSamples, void* outScore, void* outLabel, void* outBbox) { int maxGridX = dMIN(samples, 512 / N); dim3 gridDim = {(unsigned int) nAlignDown(maxGridX, 32), (unsigned int) N, 1}; dim3 threads = {Threads, 1, 1}; switch (dtype) { case nvinfer1::DataType::kFLOAT: hipLaunchKernelGGL(( argMaxGroup_kernel<float, float, Threads>), dim3(gridDim), dim3(threads), 0, stream, samples, 1, NClass, inScore, inBbox, validSamples, outScore, outLabel, outBbox); break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return hipGetLastError(); } template <int Threads, int ItermPerThreads> hipError_t sortPerClass(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass, int background, float scoreThreshold, const void* inSampleValidCount, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, void* outclassStartPosPtr, void* outScorePtr, void* outLabelPtr, void* outSampleIdxPtr, void* outValidSampleCountPtr) { int blocks = N; int threads = Threads; switch (dtype) { case nvinfer1::DataType::kFLOAT: hipLaunchKernelGGL(( sortPerClass_kernel<float, float, Threads, ItermPerThreads>), dim3(blocks), dim3(threads), 0, stream, samples, NClass, background, scoreThreshold, inSampleValidCount, inScorePtr, inLabelPtr, inBboxPtr, outclassStartPosPtr, outScorePtr, outLabelPtr, outSampleIdxPtr, outValidSampleCountPtr); break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return hipGetLastError(); }; template <int Threads> hipError_t PerClassNMS(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass, const float nmsThreshold, const void* validSampleCount, // const void *inScore, const void* inLabel, const void* inBbox, const void* inBboxRefIdx, const void* classStarts, void* outFlagSamples) { int blocks = N; int threads = Threads; switch (dtype) { case nvinfer1::DataType::kFLOAT: hipLaunchKernelGGL(( PerClassNMS_kernel<float, float, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, NClass, nmsThreshold, validSampleCount, inLabel, inBbox, inBboxRefIdx, classStarts, outFlagSamples); break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return hipGetLastError(); } template <int Threads> hipError_t KeepTopKGather(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outDetections, int proposal) { int blocks = N; int threads = Threads; switch (dtype) { case nvinfer1::DataType::kFLOAT: if (proposal) { hipLaunchKernelGGL(( TopKGatherProposal_kernel<float, float, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, keepTopK, validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outDetections); } else { hipLaunchKernelGGL(( TopKGather_kernel<float, float, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, keepTopK, validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outDetections); } break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return hipGetLastError(); } // TopKGather For TLT RPN Proposal // gridDim.x : batch-N // blockDim.x : Threads // ItemsPerThreads : = divUp(samples, Threads) // outDetectionCount : int [N], must be set 0 before kernel #define MaxItemsPerThreads 8 template <typename DType, typename BoxType, int Threads = 256> __global__ void TopKGatherBoxScore_kernel(int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outScorePtr, void* outBboxPtr) { typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1; typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2; typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3; typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4; typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5; typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6; typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7; typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8; __shared__ union { typename BlockRadixSort8::TempStorage sort8; typename BlockRadixSort7::TempStorage sort7; typename BlockRadixSort6::TempStorage sort6; typename BlockRadixSort5::TempStorage sort5; typename BlockRadixSort4::TempStorage sort4; typename BlockRadixSort3::TempStorage sort3; typename BlockRadixSort2::TempStorage sort2; typename BlockRadixSort1::TempStorage sort1; } temp_storage; assert(MaxItemsPerThreads * Threads >= samples); typedef BBoxT<BoxType> BBox; const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); const DType* inScore = static_cast<const DType*>(inScorePtr); const BBox* inBbox = static_cast<const BBox*>(inBboxPtr); const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr); const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr); BBox* outBbox = static_cast<BBox*>(outBboxPtr); DType* outScore = static_cast<DType*>(outScorePtr); int N = blockIdx.x; int blockOffset = N * samples; int validSamples = validSampleCount[N]; int finalTopK = dMIN(keepTopK, validSamples); int idx[MaxItemsPerThreads]; DType score[MaxItemsPerThreads]; int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x; for (int ite = 0; ite < totalItems; ++ite) { int curIdx = ite * blockDim.x + threadIdx.x; if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx]) { idx[ite] = curIdx; score[ite] = inScore[blockOffset + curIdx]; } else { idx[ite] = -1; score[ite] = 0.0f; } } switch (totalItems) { case 0: break; case 1: BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx); break; case 2: BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx); break; case 3: BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx); break; case 4: BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx); break; case 5: BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx); break; case 6: BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx); break; case 7: BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx); break; case 8: BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx); break; default: assert(false); } __syncthreads(); int outBlockOffset = N * keepTopK; int topkItems = (keepTopK + (Threads - 1)) / Threads; for (int i = 0; i < topkItems; ++i) { int curI = i * blockDim.x + threadIdx.x; if (curI < keepTopK) { BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f}; DType oS = 0.0f; if (curI < finalTopK && idx[i] >= 0) { oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]]; oS = score[i]; } ((BBox*) outBbox)[outBlockOffset + curI] = oB; outScore[outBlockOffset + curI] = oS; } } } template <int Threads> hipError_t KeepTopKGatherBoxScore(hipStream_t stream, int N, nvinfer1::DataType dtype, int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outScores, void* outDetections, int proposal) { int blocks = N; int threads = Threads; switch (dtype) { case nvinfer1::DataType::kFLOAT: if (proposal) { hipLaunchKernelGGL(( TopKGatherBoxScore_kernel<float, float, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, keepTopK, validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outScores, outDetections); } else { hipLaunchKernelGGL(( TopKGather_kernel<float, float, Threads>), dim3(blocks), dim3(threads), 0, stream, samples, keepTopK, validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outDetections); } break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return hipGetLastError(); } hipError_t RefineBatchClassNMS(hipStream_t stream, int N, int samples, nvinfer1::DataType dtype, const RefineNMSParameters& param, const RefineDetectionWorkSpace& refineOffset, void* workspace, const void* inScores, const void* inDelta, const void* inCountValid, const void* inROI, void* outDetections) { int NClass = param.numClasses; int8_t* wsPtr = static_cast<int8_t*>(workspace); void* argMaxScorePtr = wsPtr + refineOffset.argMaxScoreOffset; void* argMaxLabelPtr = wsPtr + refineOffset.argMaxLabelOffset; void* argMaxBBoxPtr = wsPtr + refineOffset.argMaxBboxOffset; void* sortClassScorePtr = wsPtr + refineOffset.sortClassScoreOffset; void* sortClassLabelPtr = wsPtr + refineOffset.sortClassLabelOffset; void* sortClassSampleIdxPtr = wsPtr + refineOffset.sortClassSampleIdxOffset; void* sortClassValidCountPtr = wsPtr + refineOffset.sortClassValidCountOffset; void* sortClassPosPtr = wsPtr + refineOffset.sortClassPosOffset; void* sortNMSMarkPtr = wsPtr + refineOffset.sortNMSMarkOffset; hipError_t status = hipSuccess; CUASSERT(hipMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream)); if (NClass > 1) { // multiple classes status = argMaxGroup<32>(stream, N, dtype, samples, NClass, inScores, inDelta, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr); // argMaxBBoxPtr means delta of bboxes assert(status == hipSuccess); CUASSERT(status); } else { // Only one class argMaxScorePtr = const_cast<void*>(inScores); argMaxBBoxPtr = const_cast<void*>(inDelta); int threads = 512; int blocks = (N * samples + threads - 1) / threads; blocks = dMIN(blocks, 8); switch (dtype) { case nvinfer1::DataType::kFLOAT: { hipLaunchKernelGGL(( resetMemValue_kernel<float>), dim3(blocks), dim3(threads), 0, stream, argMaxLabelPtr, N * samples, 0); break; } case nvinfer1::DataType::kHALF: { break; } default: assert(false); } } status = ApplyDelta2Bboxes(stream, N, samples, inROI, argMaxBBoxPtr, argMaxBBoxPtr); assert(status == hipSuccess); if (samples <= 1024) { status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 2048) { status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 4096) { status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else { assert(false && "unsupported sortPerClass"); return hipErrorLaunchFailure; } assert(status == hipSuccess); CUASSERT(status); status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr, // sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr); assert(status == hipSuccess); CUASSERT(status); status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outDetections, 0); assert(status == hipSuccess); CUASSERT(status); return status; } hipError_t DetectionPostProcess(hipStream_t stream, int N, int samples, const float* regWeight, const float inputHeight, const float inputWidth, nvinfer1::DataType dtype, const RefineNMSParameters& param, const RefineDetectionWorkSpace& refineOffset, void* workspace, const void* inScores, const void* inDelta, const void* inCountValid, const void* inROI, void* outDetections) { int NClass = param.numClasses; int8_t* wsPtr = static_cast<int8_t*>(workspace); void* argMaxScorePtr = wsPtr + refineOffset.argMaxScoreOffset; void* argMaxLabelPtr = wsPtr + refineOffset.argMaxLabelOffset; void* argMaxBBoxPtr = wsPtr + refineOffset.argMaxBboxOffset; void* sortClassScorePtr = wsPtr + refineOffset.sortClassScoreOffset; void* sortClassLabelPtr = wsPtr + refineOffset.sortClassLabelOffset; void* sortClassSampleIdxPtr = wsPtr + refineOffset.sortClassSampleIdxOffset; void* sortClassValidCountPtr = wsPtr + refineOffset.sortClassValidCountOffset; void* sortClassPosPtr = wsPtr + refineOffset.sortClassPosOffset; void* sortNMSMarkPtr = wsPtr + refineOffset.sortNMSMarkOffset; hipError_t status = hipSuccess; CUASSERT(hipMemsetAsync(argMaxScorePtr, 0, N * samples * sizeof(float), stream)); CUASSERT(hipMemsetAsync(argMaxBBoxPtr, 0, N * samples * 4* sizeof(float), stream)); CUASSERT(hipMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream)); CUASSERT(hipMemsetAsync(sortClassPosPtr, 0, N * (NClass+1) * sizeof(int), stream)); CUASSERT(hipMemsetAsync(sortClassSampleIdxPtr, 0, N * (samples + 1) * sizeof(int), stream)); if (NClass > 1) { // multiple classes status = argMaxWOBackground<32>(stream, N, dtype, samples, NClass, inScores, inDelta, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr); // argMaxBBoxPtr means delta of bboxes assert(status == hipSuccess); CUASSERT(status); } else { // Only one class argMaxScorePtr = const_cast<void*>(inScores); argMaxBBoxPtr = const_cast<void*>(inDelta); int threads = 512; int blocks = (N * samples + threads - 1) / threads; blocks = dMIN(blocks, 8); switch (dtype) { case nvinfer1::DataType::kFLOAT: { hipLaunchKernelGGL(( resetMemValue_kernel<float>), dim3(blocks), dim3(threads), 0, stream, argMaxLabelPtr, N * samples, 0); break; } case nvinfer1::DataType::kHALF: { break; } default: assert(false); } } status = DecodeBBoxes(stream, N, samples, regWeight, inputHeight, inputWidth, inROI, argMaxBBoxPtr, argMaxBBoxPtr); assert(status == hipSuccess); if (samples <= 1024) { status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 2048) { status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 4096) { status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else { assert(false && "unsupported sortPerClass"); return hipErrorLaunchFailure; } assert(status == hipSuccess); CUASSERT(status); status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr, // sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr); CUASSERT(status); status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outDetections, 0); CUASSERT(status); return status; } struct BF_SCORE { float bg, fg; }; // in_scores : [N, samples, 2] // output_score : [N, samples, 1] __global__ void extract_fg_kernel(int samples, const void* in_scores, void* output_score) { const BF_SCORE* in = static_cast<const BF_SCORE*>(in_scores); float* out = static_cast<float*>(output_score); int N = blockIdx.x; int blockOffset = N * samples; int totalItems = (samples + (blockDim.x - 1)) / blockDim.x; for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; out[blockOffset + cur_id] = in[blockOffset + cur_id].fg; } } __global__ void set_offset_kernel(int stride, int size, int* output) { // One block, because batch size shouldn't be too large. for (int i = threadIdx.x; i < size; i += blockDim.x) { output[i] = i * stride; } } __global__ void resample_kernel(int orig_size, int sample_size, const void* orig_score_ptr, const void* orig_bbox_ptr, void* sampled_score_ptr, void* sampled_bbox_ptr) { const float* in_score = static_cast<const float*>(orig_score_ptr); const BBoxT<float>* in_bbox = static_cast<const BBoxT<float>*>(orig_bbox_ptr); float* out_score = static_cast<float*>(sampled_score_ptr); BBoxT<float>* out_bbox = static_cast<BBoxT<float>*>(sampled_bbox_ptr); int N = blockIdx.x; int blockOffset_in = N * orig_size; int blockOffset_out = N * sample_size; int realSampleCnt = dMIN(sample_size, orig_size); int totalItems = (realSampleCnt + (blockDim.x - 1)) / blockDim.x; for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; if (cur_id < realSampleCnt) { out_score[blockOffset_out + cur_id] = in_score[blockOffset_in + cur_id]; out_bbox[blockOffset_out + cur_id] = in_bbox[blockOffset_in + cur_id]; } } } hipError_t proposalRefineBatchClassNMS(hipStream_t stream, int N, int inputCnt, int samples, nvinfer1::DataType dtype, const RefineNMSParameters& param, const ProposalWorkSpace& proposalOffset, void* workspace, const void* inScores, //[N, inputcnt, 2] const void* inDelta, //[N, inputcnt, 4] const void* inCountValid, const void* inAnchors, //[N, inputcnt, 4] void* outProposals) { int8_t* wsPtr = static_cast<int8_t*>(workspace); void* tempStoragePtr = wsPtr + proposalOffset.tempStorageOffset; void* preRefineScorePtr = wsPtr + proposalOffset.preRefineScoreOffset; void* preRefineSortedScorePtr = wsPtr + proposalOffset.preRefineSortedScoreOffset; void* preRefineBboxPtr = wsPtr + proposalOffset.preRefineBboxOffset; void* argMaxScorePtr = wsPtr + proposalOffset.argMaxScoreOffset; void* argMaxLabelPtr = wsPtr + proposalOffset.argMaxLabelOffset; void* argMaxBBoxPtr = wsPtr + proposalOffset.argMaxBboxOffset; void* sortClassScorePtr = wsPtr + proposalOffset.sortClassScoreOffset; void* sortClassLabelPtr = wsPtr + proposalOffset.sortClassLabelOffset; void* sortClassSampleIdxPtr = wsPtr + proposalOffset.sortClassSampleIdxOffset; void* sortClassValidCountPtr = wsPtr + proposalOffset.sortClassValidCountOffset; void* sortClassPosPtr = wsPtr + proposalOffset.sortClassPosOffset; void* sortNMSMarkPtr = wsPtr + proposalOffset.sortNMSMarkOffset; hipError_t status = hipSuccess; CUASSERT(hipMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream)); // extract foreground score hipLaunchKernelGGL(( extract_fg_kernel), dim3(N), dim3(dMIN(inputCnt, 1024)), 0, stream, inputCnt, inScores, preRefineScorePtr); CUASSERT(hipGetLastError()); // Here, inDelta are converted to normalize coordinates based on anchors status = ApplyDelta2Bboxes(stream, N, inputCnt, inAnchors, inDelta, const_cast<void*>(inDelta)); CUASSERT(status); // sort the score // d_key_in: preRefineScorePtr [N, inputCnt, 1] // d_key_out: preRefineSortedScorePtr // d_values_in: inDelta [N, inputCnt, 4] // d_values_out: preRefineBboxPtr // num_items: inputCnt*N // num_segments: N // offsets: [0, inputCnt, inputCnt*2, ..., ] int* offsets = static_cast<int*>(tempStoragePtr); hipLaunchKernelGGL(( set_offset_kernel), dim3(1), dim3(1024), 0, stream, inputCnt, N + 1, offsets); assert(hipGetLastError() == hipSuccess); tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1)); size_t temp_storage_bytes = 0; hipcub::DeviceSegmentedRadixSort::SortPairsDescending(NULL, temp_storage_bytes, (float*) preRefineScorePtr, (float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); assert((1 << 23) * (size_t)N > temp_storage_bytes); hipcub::DeviceSegmentedRadixSort::SortPairsDescending(tempStoragePtr, temp_storage_bytes, (float*) preRefineScorePtr, (float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); int NClass = param.numClasses; assert(NClass == 1); if (NClass == 1) { // Only one class hipLaunchKernelGGL(( resample_kernel), dim3(N), dim3(dMIN(samples, 1024)), 0, stream, inputCnt, samples, preRefineSortedScorePtr, preRefineBboxPtr, argMaxScorePtr, argMaxBBoxPtr); int threads = 512; int blocks = (N * samples + threads - 1) / threads; blocks = dMIN(blocks, 8); switch (dtype) { case nvinfer1::DataType::kFLOAT: { hipLaunchKernelGGL(( resetMemValue_kernel<float>), dim3(blocks), dim3(threads), 0, stream, argMaxLabelPtr, N * samples, 0); break; } case nvinfer1::DataType::kHALF: { break; } default: assert(false); } } if (samples <= 1024) { status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 2048) { status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 4096) { status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else { assert(false && "unsupported sortPerClass"); return hipErrorLaunchFailure; } CUASSERT(status); status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr, // sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr); CUASSERT(status); status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outProposals, 1); CUASSERT(status); return status; } hipError_t MultilevelPropose(hipStream_t stream, int N, int inputCnt, int samples, const float* regWeight, const float inputHeight, const float inputWidth, nvinfer1::DataType dtype, const RefineNMSParameters& param, const MultilevelProposeROIWorkSpace& proposalOffset, void* workspace, const void* inScore, //[N, inputcnt, 1] const void* inDelta, //[N, inputcnt, 4] void* inCountValid, const void* inAnchors, //[N, inputcnt, 4] void* outScore, void* outBbox) { int8_t* wsPtr = static_cast<int8_t*>(workspace); void* tempStoragePtr = wsPtr + proposalOffset.tempStorageOffset; void* preRefineSortedScorePtr = wsPtr + proposalOffset.preRefineSortedScoreOffset; void* preRefineBboxPtr = wsPtr + proposalOffset.preRefineBboxOffset; void* argMaxScorePtr = wsPtr + proposalOffset.argMaxScoreOffset; void* argMaxLabelPtr = wsPtr + proposalOffset.argMaxLabelOffset; void* argMaxBBoxPtr = wsPtr + proposalOffset.argMaxBboxOffset; void* sortClassScorePtr = wsPtr + proposalOffset.sortClassScoreOffset; void* sortClassLabelPtr = wsPtr + proposalOffset.sortClassLabelOffset; void* sortClassSampleIdxPtr = wsPtr + proposalOffset.sortClassSampleIdxOffset; void* sortClassValidCountPtr = wsPtr + proposalOffset.sortClassValidCountOffset; void* sortClassPosPtr = wsPtr + proposalOffset.sortClassPosOffset; void* sortNMSMarkPtr = wsPtr + proposalOffset.sortNMSMarkOffset; hipError_t status = hipSuccess; int NClass = param.numClasses; assert(NClass == 1); CUASSERT(hipMemsetAsync(argMaxScorePtr, 0, N * samples * sizeof(float), stream)); CUASSERT(hipMemsetAsync(argMaxBBoxPtr, 0, N * samples * 4* sizeof(float), stream)); CUASSERT(hipMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream)); CUASSERT(hipMemsetAsync(sortClassPosPtr, 0, N * (NClass+1) * sizeof(int), stream)); CUASSERT(hipMemsetAsync(sortClassSampleIdxPtr, 0, N * (samples + 1) * sizeof(int), stream)); CUASSERT(hipGetLastError()); // Here, inDelta are converted to normalize coordinates based on anchors status = DecodeBBoxes(stream, N, inputCnt, regWeight, inputHeight, inputWidth, inAnchors, inDelta, const_cast<void*>(inDelta)); CUASSERT(hipGetLastError()); // sort the score // d_key_in: preRefineScorePtr [N, inputCnt, 1] // d_key_out: preRefineSortedScorePtr // d_values_in: inDelta [N, inputCnt, 4] // d_values_out: preRefineBboxPtr // num_items: inputCnt*N // num_segments: N // offsets: [0, inputCnt, inputCnt*2, ..., ] int* offsets = static_cast<int*>(tempStoragePtr); hipLaunchKernelGGL(( set_offset_kernel), dim3(1), dim3(1024), 0, stream, inputCnt, N + 1, offsets); CUASSERT(hipGetLastError()); tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1)); size_t temp_storage_bytes = 0; hipcub::DeviceSegmentedRadixSort::SortPairsDescending(NULL, temp_storage_bytes, (float*) inScore, (float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); CUASSERT(hipGetLastError()); assert((1 << 23) * N > temp_storage_bytes); hipcub::DeviceSegmentedRadixSort::SortPairsDescending(tempStoragePtr, temp_storage_bytes, (float*) inScore, (float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); CUASSERT(hipGetLastError()); if (NClass == 1) { // Only one class hipLaunchKernelGGL(( resample_kernel), dim3(N), dim3(dMIN(samples, 1024)), 0, stream, inputCnt, samples, preRefineSortedScorePtr, preRefineBboxPtr, argMaxScorePtr, argMaxBBoxPtr); CUASSERT(hipGetLastError()); int threads = 512; int blocks = (N * samples + threads - 1) / threads; blocks = dMIN(blocks, 8); switch (dtype) { case nvinfer1::DataType::kFLOAT: { hipLaunchKernelGGL(( resetMemValue_kernel<float>), dim3(blocks), dim3(threads), 0, stream, argMaxLabelPtr, N * samples, 0); CUASSERT(hipGetLastError()); break; } case nvinfer1::DataType::kHALF: { break; } default: assert(false); } } if (samples <= 1024) { status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 2048) { status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 4096) { status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else { assert(false && "unsupported sortPerClass"); return hipErrorLaunchFailure; } CUASSERT(hipGetLastError()); status = PerClassNMS<1024>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr, // sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr); CUASSERT(hipGetLastError()); status = KeepTopKGatherBoxScore<1024>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outScore, outBbox, 1); CUASSERT(hipGetLastError()); return status; } struct BBOX { float y1, x1, y2, x2; }; struct DELTA { float dy, dx, logdh, logdw; }; __global__ void decode_bboxes_kernel(int samples, const void* anchors, const void* delta, const float* regWeight, const float inputHeight, const float inputWidth, void* outputBbox, float bboxClipThresh) { const BBOX* anchors_in = static_cast<const BBOX*>(anchors); const DELTA* delta_in = static_cast<const DELTA*>(delta); BBOX* bbox_out = static_cast<BBOX*>(outputBbox); int N = blockIdx.x; int blockOffset = N * samples; int totalItems = (samples + (blockDim.x - 1)) / blockDim.x; for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; if (cur_id < samples) { BBOX cur_anchor_yxyx = anchors_in[blockOffset + cur_id]; // convert yxyx -> cyxhw // cy, cx, h, w /*BBOX cur_anchor_cyxhw;*/ float cur_anchor_h = (cur_anchor_yxyx.y2 - cur_anchor_yxyx.y1 + 1.0); float cur_anchor_w = (cur_anchor_yxyx.x2 - cur_anchor_yxyx.x1 + 1.0); //w float cur_anchor_yc = cur_anchor_yxyx.y1 + cur_anchor_h * 0.5; //cy float cur_anchor_xc = cur_anchor_yxyx.x1 + cur_anchor_w * 0.5; //cx DELTA cur_delta = delta_in[blockOffset + cur_id]; // divided by regWeight cur_delta.dy /= regWeight[0]; cur_delta.dx /= regWeight[1]; cur_delta.logdh /= regWeight[2]; cur_delta.logdw /= regWeight[3]; cur_delta.logdh = dMIN(cur_delta.logdh, bboxClipThresh); cur_delta.logdw = dMIN(cur_delta.logdw, bboxClipThresh); // apply delta float decoded_box_yc = cur_anchor_yc + cur_delta.dy * cur_anchor_h; float decoded_box_xc = cur_anchor_xc + cur_delta.dx * cur_anchor_w; float decoded_box_h = expf(cur_delta.logdh) * cur_anchor_h; float decoded_box_w = expf(cur_delta.logdw) * cur_anchor_w; float decoded_box_ymin = decoded_box_yc - 0.5 * decoded_box_h; float decoded_box_xmin = decoded_box_xc - 0.5 * decoded_box_w; float decoded_box_ymax = decoded_box_ymin + decoded_box_h - 1.0; float decoded_box_xmax = decoded_box_xmin + decoded_box_w - 1.0; // clip bbox: a more precision clip method based on real window could be implemented decoded_box_ymin = dMAX(dMIN(decoded_box_ymin, inputHeight - 1.0), 0.0); decoded_box_xmin = dMAX(dMIN(decoded_box_xmin, inputWidth - 1.0), 0.0); decoded_box_ymax = dMAX(dMIN(decoded_box_ymax, inputHeight - 1.0), 0.0); decoded_box_xmax = dMAX(dMIN(decoded_box_xmax, inputWidth - 1.0), 0.0); bbox_out[blockOffset + cur_id].y1 = decoded_box_ymin; bbox_out[blockOffset + cur_id].x1 = decoded_box_xmin; bbox_out[blockOffset + cur_id].y2 = decoded_box_ymax; bbox_out[blockOffset + cur_id].x2 = decoded_box_xmax; } } } hipError_t DecodeBBoxes(hipStream_t stream, int N, int samples, // number of anchors per image const float* regWeight, const float inputHeight, const float inputWidth, const void* anchors, // [N, anchors, (y1, x1, y2, x2)] const void* delta, //[N, anchors, (dy, dx, log(dh), log(dw)]) void* outputBbox //[N, anchors, (y1, x1, y2, x2)] ) { int blocks = N; int threads = dMIN(samples, 1024); // delta multiply bbox_std // apply delta steps: // cy = anchor_cy + dy*height // cx = anchor_cx + dx*weight // h = exp(dh)*anchor_h // w = exp(dw)*anchor_w // clip the bbox in absolute coordinates float bboxClipThresh = log(1000.0f/16.0f); hipLaunchKernelGGL(( decode_bboxes_kernel), dim3(blocks), dim3(threads), 0, stream, samples, anchors, delta, regWeight, inputHeight, inputWidth, outputBbox, bboxClipThresh); return hipGetLastError(); } __global__ void apply_delta_kernel(int samples, const void* anchors, const void* delta, void* outputBbox) { const BBOX* anchors_in = static_cast<const BBOX*>(anchors); const DELTA* delta_in = static_cast<const DELTA*>(delta); BBOX* bbox_out = static_cast<BBOX*>(outputBbox); int N = blockIdx.x; int blockOffset = N * samples; int totalItems = (samples + (blockDim.x - 1)) / blockDim.x; for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; BBOX cur_anchor_yxyx = anchors_in[blockOffset + cur_id]; // convert yxyx -> cyxhw // cy, cx, h, w BBOX cur_anchor_cyxhw; cur_anchor_cyxhw.y1 = (cur_anchor_yxyx.y1 + cur_anchor_yxyx.y2) / 2; cur_anchor_cyxhw.x1 = (cur_anchor_yxyx.x1 + cur_anchor_yxyx.x2) / 2; cur_anchor_cyxhw.y2 = (cur_anchor_yxyx.y2 - cur_anchor_yxyx.y1); cur_anchor_cyxhw.x2 = (cur_anchor_yxyx.x2 - cur_anchor_yxyx.x1); DELTA cur_delta = delta_in[blockOffset + cur_id]; // multiply std_dev cur_delta.dy *= 0.1; cur_delta.dx *= 0.1; cur_delta.logdh *= 0.2; cur_delta.logdw *= 0.2; // apply delta cur_anchor_cyxhw.y1 += cur_delta.dy * cur_anchor_cyxhw.y2; cur_anchor_cyxhw.x1 += cur_delta.dx * cur_anchor_cyxhw.x2; cur_anchor_cyxhw.y2 *= expf(cur_delta.logdh); cur_anchor_cyxhw.x2 *= expf(cur_delta.logdw); cur_anchor_yxyx.y1 = cur_anchor_cyxhw.y1 - 0.5 * cur_anchor_cyxhw.y2; cur_anchor_yxyx.x1 = cur_anchor_cyxhw.x1 - 0.5 * cur_anchor_cyxhw.x2; cur_anchor_yxyx.y2 = cur_anchor_yxyx.y1 + cur_anchor_cyxhw.y2; cur_anchor_yxyx.x2 = cur_anchor_yxyx.x1 + cur_anchor_cyxhw.x2; // clip bbox: a more precision clip method based on real window could be implemented cur_anchor_yxyx.y1 = dMAX(dMIN(cur_anchor_yxyx.y1, 1.0), 0.0); cur_anchor_yxyx.x1 = dMAX(dMIN(cur_anchor_yxyx.x1, 1.0), 0.0); cur_anchor_yxyx.y2 = dMAX(dMIN(cur_anchor_yxyx.y2, 1.0), 0.0); cur_anchor_yxyx.x2 = dMAX(dMIN(cur_anchor_yxyx.x2, 1.0), 0.0); bbox_out[blockOffset + cur_id].y1 = cur_anchor_yxyx.y1; bbox_out[blockOffset + cur_id].x1 = cur_anchor_yxyx.x1; bbox_out[blockOffset + cur_id].y2 = cur_anchor_yxyx.y2; bbox_out[blockOffset + cur_id].x2 = cur_anchor_yxyx.x2; } } hipError_t ApplyDelta2Bboxes(hipStream_t stream, int N, int samples, // number of anchors per image const void* anchors, // [N, anchors, (y1, x1, y2, x2)] const void* delta, //[N, anchors, (dy, dx, log(dh), log(dw)]) void* outputBbox //[N, anchors, (y1, x1, y2, x2)] ) { int blocks = N; int threads = dMIN(samples, 1024); // delta multiply bbox_std // apply delta steps: // cy = anchor_cy + dy*height // cx = anchor_cx + dx*weight // h = exp(dh)*anchor_h // w = exp(dw)*anchor_w // clip the bbox hipLaunchKernelGGL(( apply_delta_kernel), dim3(blocks), dim3(threads), 0, stream, samples, anchors, delta, outputBbox); return hipGetLastError(); } template <typename Tfeat> __device__ inline Tfeat interpolateBilinear(const Tfeat* src, xy_t srcDims, float y, float x) { const int y0 = static_cast<int>(y); const float yAlpha = y - static_cast<float>(y0); const int x0 = static_cast<int>(x); const float xAlpha = x - static_cast<float>(x0); assert(y0 < srcDims.y); assert(x0 < srcDims.x); const int y1 = (yAlpha == 0) ? y0 : y0 + 1; // ceil const int x1 = (xAlpha == 0) ? x0 : x0 + 1; // ceil assert(y1 < srcDims.y); assert(x1 < srcDims.x); const float src00 = src[(y0) *srcDims.x + (x0)]; const float src01 = src[(y0) *srcDims.x + (x1)]; const float src10 = src[(y1) *srcDims.x + (x0)]; const float src11 = src[(y1) *srcDims.x + (x1)]; const float src0 = src00 * (1 - xAlpha) + src01 * xAlpha; const float src1 = src10 * (1 - xAlpha) + src11 * xAlpha; return src0 * (1 - yAlpha) + src1 * yAlpha; } template <typename Trois, typename Tfeat> __global__ void roiAlign_kernel(int featureCount, int roiCount, float threshold, const Trois* rois, const Tfeat* P2, const xy_t P2dims, const Tfeat* P3, const xy_t P3dims, const Tfeat* P4, const xy_t P4dims, const Tfeat* P5, const xy_t P5dims, Tfeat* pooled, const xy_t poolDims) { const int batch = blockIdx.x; const int feature = blockIdx.y; for (int roiIdx = threadIdx.x; roiIdx < roiCount; roiIdx += blockDim.x) { const Trois* roi = rois + 4 * (batch * roiCount + roiIdx); const float y1 = roi[0]; const float x1 = roi[1]; const float y2 = roi[2]; const float x2 = roi[3]; if (!(0 <= y1 && y1 <= 1 && 0 <= x1 && x1 <= 1 && 0 <= y2 && y2 <= 1 && 0 <= x2 && x2 <= 1 && y1 < y2 && x1 < x2)) { continue; } else { } const float hw = (y2 - y1) * (x2 - x1); const Tfeat* src = P2; xy_t srcDims = P2dims; int iP = 2; if (hw > threshold) { src = P3; srcDims = P3dims; ++iP; } threshold *= 4; if (hw > threshold) { src = P4; srcDims = P4dims; ++iP; } threshold *= 4; if (hw > threshold) { src = P5; srcDims = P5dims; ++iP; } src += srcDims.x * srcDims.y * (batch * featureCount + feature); Tfeat* dst = pooled + poolDims.x * poolDims.y * (batch * roiCount * featureCount + roiIdx * featureCount + feature); const float yStart = y1 * (srcDims.y - 1); const float xStart = x1 * (srcDims.x - 1); const float yEnd = y2 * (srcDims.y - 1); const float xEnd = x2 * (srcDims.x - 1); const float yDelta = (yEnd - yStart) / (poolDims.y - 1); const float xDelta = (xEnd - xStart) / (poolDims.x - 1); for (int yy = 0; yy < poolDims.y; ++yy) { const float ySample = min(yStart + yDelta * yy, yEnd); for (int xx = 0; xx < poolDims.x; ++xx) { const float xSample = min(xStart + xDelta * xx, xEnd); float result = interpolateBilinear(src, srcDims, ySample, xSample); *dst = result; dst++; } } } } hipError_t roiAlign(hipStream_t stream, int batchSize, int featureCount, int roiCount, float firstThreshold, const void* rois, const void* const layers[], const xy_t* layerDims, void* pooled, const xy_t poolDims) { const dim3 blocks(batchSize, featureCount); const int threads(256); hipLaunchKernelGGL(( roiAlign_kernel), dim3(blocks), dim3(threads), 0, stream, featureCount, roiCount, firstThreshold, static_cast<const float*>(rois), static_cast<const float*>(layers[0]), layerDims[0], static_cast<const float*>(layers[1]), layerDims[1], static_cast<const float*>(layers[2]), layerDims[2], static_cast<const float*>(layers[3]), layerDims[3], static_cast<float*>(pooled), poolDims); return hipGetLastError(); } template <typename Trois, typename Tfeat> __global__ void roiAlignHalfCenter_kernel(int featureCount, int roiCount, float threshold, int inputHeight, int inputWidth, const Trois* rois, const Tfeat* P2, const xy_t P2dims, const Tfeat* P3, const xy_t P3dims, const Tfeat* P4, const xy_t P4dims, const Tfeat* P5, const xy_t P5dims, const Tfeat* P6, const xy_t P6dims, Tfeat* pooled, const xy_t poolDims) { const int batch = blockIdx.x; const int feature = blockIdx.y; for (int roiIdx = threadIdx.x; roiIdx < roiCount; roiIdx += blockDim.x) { const Trois* roi = rois + 4 * (batch * roiCount + roiIdx); const float y1 = roi[0]; const float x1 = roi[1]; const float y2 = roi[2]; const float x2 = roi[3]; if (!(0 <= y1 && y1 <= inputHeight && 0 <= x1 && x1 <= inputWidth && 0 <= y2 && y2 <= inputHeight && 0 <= x2 && x2 <= inputWidth && y1 < y2 && x1 < x2)) { continue; } else { } const float hw = (y2 - y1) * (x2 - x1); const Tfeat* src = P2; xy_t srcDims = P2dims; int iP = 2; if (hw > threshold) { src = P3; srcDims = P3dims; ++iP; } threshold *= 4; if (hw > threshold) { src = P4; srcDims = P4dims; ++iP; } threshold *= 4; if (hw > threshold) { src = P5; srcDims = P5dims; ++iP; } threshold *= 4; if (hw > threshold) { src = P6; srcDims = P6dims; ++iP; } src += srcDims.x * srcDims.y * (batch * featureCount + feature); Tfeat* dst = pooled + poolDims.x * poolDims.y * (batch * roiCount * featureCount + roiIdx * featureCount + feature); float scale_to_level = 1.0f; for(int i = 0; i < iP; i++) { scale_to_level *= 2.0f; } const float yStart = y1 / scale_to_level; const float xStart = x1 / scale_to_level; const float yEnd = y2 / scale_to_level; const float xEnd = x2 / scale_to_level; const float yDelta = (yEnd - yStart) / (poolDims.y); const float xDelta = (xEnd - xStart) / (poolDims.x); for (int yy = 0; yy < poolDims.y; ++yy) { const float ySample = dMIN(dMAX(yStart + yDelta * (yy + 0.5), 0.0f), srcDims.y - 1.0f); for (int xx = 0; xx < poolDims.x; ++xx) { const float xSample = dMIN(dMAX(xStart + xDelta * (xx + 0.5), 0.0f), srcDims.x - 1.0f); float result = interpolateBilinear(src, srcDims, ySample, xSample); *dst = result; dst++; } } } } hipError_t roiAlignHalfCenter(hipStream_t stream, int batchSize, int featureCount, int roiCount, float firstThreshold, int inputHeight, int inputWidth, const void* rois, const void* const layers[], const xy_t* layerDims, void* pooled, const xy_t poolDims) { const dim3 blocks(batchSize, featureCount); const int threads(256); hipLaunchKernelGGL(( roiAlignHalfCenter_kernel), dim3(blocks), dim3(threads), 0, stream, featureCount, roiCount, firstThreshold, inputHeight, inputWidth, static_cast<const float*>(rois), static_cast<const float*>(layers[0]), layerDims[0], static_cast<const float*>(layers[1]), layerDims[1], static_cast<const float*>(layers[2]), layerDims[2], static_cast<const float*>(layers[3]), layerDims[3], static_cast<const float*>(layers[4]), layerDims[4], static_cast<float*>(pooled), poolDims); return hipGetLastError(); } __global__ void resize_nearest_kernel_2d(int nbatch, float scale, int2 osize, float const* idata, int istride, int ibatchstride, float* odata, int ostride, int obatchstride) { int x0 = threadIdx.x + blockIdx.x * blockDim.x; int y0 = threadIdx.y + blockIdx.y * blockDim.y; int z0 = blockIdx.z; for (int batch = z0; batch < nbatch; batch += gridDim.z) { for (int oy = y0; oy < osize.y; oy += blockDim.y * gridDim.y) { for (int ox = x0; ox < osize.x; ox += blockDim.x * gridDim.x) { int ix = int(ox / scale); int iy = int(oy / scale); odata[batch * obatchstride + oy * ostride + ox] = idata[batch * ibatchstride + iy * istride + ix]; } } } } void resizeNearest(dim3 grid, dim3 block, hipStream_t stream, int nbatch, float scale, int2 osize, float const* idata, int istride, int ibatchstride, float* odata, int ostride, int obatchstride) { hipLaunchKernelGGL(( resize_nearest_kernel_2d), dim3(grid), dim3(block), 0, stream, nbatch, scale, osize, idata, istride, ibatchstride, odata, ostride, obatchstride); } struct BOX { float y1, x1, y2, x2; }; struct DETECTION { float y1, x1, y2, x2, class_id, score; }; __global__ void specialslice_kernel(int samples, const void* idata, void* odata) { int N = blockIdx.x; int blockOffset = N * samples; int totalItems = (samples + (blockDim.x - 1)) / blockDim.x; const DETECTION* in_detections = static_cast<const DETECTION*>(idata); BOX* out_bboxes = static_cast<BOX*>(odata); for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; out_bboxes[blockOffset + cur_id].y1 = in_detections[blockOffset + cur_id].y1; out_bboxes[blockOffset + cur_id].x1 = in_detections[blockOffset + cur_id].x1; out_bboxes[blockOffset + cur_id].y2 = in_detections[blockOffset + cur_id].y2; out_bboxes[blockOffset + cur_id].x2 = in_detections[blockOffset + cur_id].x2; } } void specialSlice(hipStream_t stream, int batch_size, int boxes_cnt, const void* idata, void* odata) { int blocks = batch_size; int threads = dMIN(boxes_cnt, 2048); hipLaunchKernelGGL(( specialslice_kernel), dim3(blocks), dim3(threads), 0, stream, boxes_cnt, idata, odata); } template <typename Dtype> __global__ void concatenate(int featureCnt, int sampleCnt, const void* const* inScores, const void* const* inBBox, void* outScore, void* outBBox) { int N = blockIdx.x; int outBlockOffset = N * sampleCnt * featureCnt; int inBlockOffset = N * sampleCnt; int itemsPerThread = (sampleCnt + blockDim.x - 1) / blockDim.x; Dtype* outScorePtr = static_cast<Dtype*>(outScore); BOX* outBBoxPtr = static_cast<BOX*>(outBBox); for(int fId = 0; fId < featureCnt; fId++) { const Dtype* fInScorePtr = static_cast<const Dtype*>(inScores[fId]); const BOX* fInBBoxPtr = static_cast<const BOX*>(inBBox[fId]); int featureOffset = fId * sampleCnt; for(int i = 0; i < itemsPerThread; i++) { int curId = i * blockDim.x + threadIdx.x; if (curId < sampleCnt) { outScorePtr[outBlockOffset + featureOffset + curId] = fInScorePtr[inBlockOffset + curId]; outBBoxPtr[outBlockOffset + featureOffset + curId] = fInBBoxPtr[inBlockOffset + curId]; } } } } __global__ void resampleBBox_kernel(int orig_size, int sample_size, const void* orig_bbox_ptr, void* sampled_bbox_ptr) { const BBoxT<float>* in_bbox = static_cast<const BBoxT<float>*>(orig_bbox_ptr); BBoxT<float>* out_bbox = static_cast<BBoxT<float>*>(sampled_bbox_ptr); int N = blockIdx.x; int blockOffset_in = N * orig_size; int blockOffset_out = N * sample_size; int totalItems = (sample_size + (blockDim.x - 1)) / blockDim.x; for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; if (cur_id < sample_size) { out_bbox[blockOffset_out + cur_id] = in_bbox[blockOffset_in + cur_id]; } } } hipError_t ConcatTopK(hipStream_t stream, int N, int featureCnt, int topK, nvinfer1::DataType dtype, void* workspace, const ConcatTopKWorkSpace& spaceOffset, void** inScores, void** inBBox, void* outProposals) { //Prepare Offset int8_t* wsPtr = static_cast<int8_t*>(workspace); void* tempStoragePtr = wsPtr + spaceOffset.tempStorageOffset; void* concatedScorePtr = wsPtr + spaceOffset.concatedScoreOffset; void* concatedBBoxPtr = wsPtr + spaceOffset.concatedBBoxOffset; void* sortedScorePtr = wsPtr + spaceOffset.sortedScoreOffset; void* sortedBBoxPtr = wsPtr + spaceOffset.sortedBBoxOffset; int blocks = N; //batch_size int threads = dMIN(topK, 2048); //Concat Scores and inBBox switch (dtype) { case nvinfer1::DataType::kFLOAT: hipLaunchKernelGGL(( concatenate<float>), dim3(blocks), dim3(threads), 0, stream, featureCnt, topK, inScores, inBBox, concatedScorePtr, concatedBBoxPtr); CUASSERT(hipGetLastError()); break; case nvinfer1::DataType::kHALF: assert(false); default: assert(false); } //Sort and sample topK int itemCnt = topK * featureCnt; int* offsets = static_cast<int*>(tempStoragePtr); hipLaunchKernelGGL(( set_offset_kernel), dim3(1), dim3(1024), 0, stream, itemCnt, N + 1, offsets); assert(hipGetLastError() == hipSuccess); tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1)); //Sort size_t temp_storage_bytes = 0; hipcub::DeviceSegmentedRadixSort::SortPairsDescending(NULL, temp_storage_bytes, (float*) concatedScorePtr, (float*) sortedScorePtr, (BBoxT<float>*) concatedBBoxPtr, (BBoxT<float>*) sortedBBoxPtr, N * itemCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); assert((1 << 23) * N > temp_storage_bytes); hipcub::DeviceSegmentedRadixSort::SortPairsDescending(tempStoragePtr, temp_storage_bytes, (float*) concatedScorePtr, (float*) sortedScorePtr, (BBoxT<float>*) concatedBBoxPtr, (BBoxT<float>*) sortedBBoxPtr, N * itemCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); assert(hipGetLastError() == hipSuccess); //Sample hipLaunchKernelGGL(( resampleBBox_kernel), dim3(N), dim3(dMIN(topK, 1024)), 0, stream, itemCnt, topK, sortedBBoxPtr, outProposals); assert(hipGetLastError() == hipSuccess); return hipGetLastError(); }
5e0618a7ea902da29955d6f8cf92208ea7b385ae.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <assert.h> #include <cuda.h> #include <cuda_fp16.h> #include <cuda_runtime.h> #include "maskRCNNKernels.h" #include "plugin.h" #include <NvInfer.h> #include <assert.h> #include <cub/cub.cuh> #include <iostream> #include <stdio.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #define DUBUG_KERNEL 0 #define DUBUG_BATCH 0 #define DEBUG_T 1 #define dMIN(a, b) ((a) < (b) ? (a) : (b)) #define dMAX(a, b) ((a) > (b) ? (a) : (b)) #define dCLAMP(x, xMin, xMax) ((x) > (xMin) ? ((x) < (xMax) ? (x) : (xMax)) : (xMin)) template <typename BoxType> struct BBoxT { BoxType y1, x1, y2, x2; }; template <typename DType> __global__ void argMaxReset_kernel( int samples, int NClass, const DType* in_scores, const int* maxIdx, DType* out_scores) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int max_idx = samples * NClass; if (idx >= max_idx) return; int sampleIdx = idx / NClass; int classIdx = idx % NClass; if (classIdx != maxIdx[sampleIdx]) out_scores[idx] = 0; else out_scores[idx] = in_scores[idx]; } template <typename DType> struct ScanItem { DType data; int idx; }; template <typename DType> struct GreaterItem { __host__ __device__ __forceinline__ ScanItem<DType> operator()( const ScanItem<DType>& a, const ScanItem<DType>& b) const { return (a.data > b.data ? a : b); } }; template <typename DType> __global__ void resetMemValue_kernel(void* outPtr, int samples, float val) { DType* out = static_cast<DType*>(outPtr); int loop = gridDim.x * blockDim.x; for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < samples; idx += loop) { out[idx] = (DType) val; } } // blockDim.x : NClass // GroupDim.x : sample count // GroupDim.y : batch N // outScore : DType[ N * sample * 1 ] // outLabel : int[ N * sample * 1 ] // outBbox : int[ N * sample * 4 ] template <typename DType, typename BoxType, int Threads = 32> __global__ void argMaxGroup_kernel(int samples, int start_class_id, int NClass, const void* inScorePtr, const void* inBboxPtr, const void* validSampleCountPtr, void* outScorePtr, void* outLabelPtr, void* outBboxPtr) { const DType* inScore = static_cast<const DType*>(inScorePtr); const BoxType* inBbox = static_cast<const BoxType*>(inBboxPtr); const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); DType* outScore = static_cast<DType*>(outScorePtr); BoxType* outLabel = static_cast<BoxType*>(outLabelPtr); BoxType* outBbox = static_cast<BoxType*>(outBboxPtr); const int N = blockIdx.y; const int validSamples = validSampleCount[N]; typedef ScanItem<DType> ScanItemD; typedef cub::BlockReduce<ScanItemD, Threads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int iSample = blockIdx.x; iSample < validSamples; iSample += gridDim.x) { int classOffset = (N * samples + iSample) * NClass; // start from [batch, count, class0] // total IPerThread * blockDim ScanItemD maxItem = {0.0f, -1}; for (int i = start_class_id; i < NClass; i += Threads) { int curIdx = i + threadIdx.x; ScanItemD item = {0.0f, -1}; if (curIdx < NClass) { item.data = inScore[classOffset + curIdx]; item.idx = curIdx; } const int validNum = (NClass - i > Threads ? Threads : NClass - i); ScanItemD aggregate = BlockReduce(temp_storage).Reduce(item, GreaterItem<DType>(), validNum); __syncthreads(); if (aggregate.data > maxItem.data) { maxItem = aggregate; } #if DUBUG_KERNEL if (N == DUBUG_BATCH && threadIdx.x == 0 && iSample < 15 /*&& maxItem.idx >= 32*/) { printf("argMaxGroup N:%d, iSample:%d, maxItem(score:%.3f, idx:%d)validReduceNum:%d\n", N, iSample, (float) maxItem.data, maxItem.idx, validNum); } #endif } const int dstOffset = N * samples + iSample; if (threadIdx.x == 0) { outScore[dstOffset] = maxItem.data; outLabel[dstOffset] = (BoxType) maxItem.idx; outBbox[dstOffset * 4] = inBbox[(classOffset + maxItem.idx) * 4]; outBbox[dstOffset * 4 + 1] = inBbox[(classOffset + maxItem.idx) * 4 + 1]; outBbox[dstOffset * 4 + 2] = inBbox[(classOffset + maxItem.idx) * 4 + 2]; outBbox[dstOffset * 4 + 3] = inBbox[(classOffset + maxItem.idx) * 4 + 3]; } } } struct BlockClassSumPrefix { int total; // Constructor __device__ BlockClassSumPrefix() : total(0) { } // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide scan. __device__ int operator()(int aggregate) { int old = total; total += aggregate; return old; } }; #define LabelShift (DType)(2.5f) #define MinValidScore (DType)(0.01f) template <typename DType> __device__ __forceinline__ DType getKey(DType score, int lable, int NClass) { return (lable < 0 ? (DType) 0 : ((DType)(NClass - lable - 1) * LabelShift + score + MinValidScore)); } template <typename DType, typename BoxType> __device__ __forceinline__ void getScoreLable(DType key, int NClass, DType& score, BoxType& lable) { int i = key / LabelShift; score = (key <= MinValidScore ? (DType) 0 : key - (DType) i * LabelShift - MinValidScore); score = dCLAMP(score, (DType) 0, (DType) 1.0); lable = (BoxType)(key <= MinValidScore ? -1 : (NClass - i - 1)); } // blockDim.x : threads // gridDim.x : batch N // validSampleCount INPUT : int [N] // classStartPos OUTPUT: int [N * (Class + 1)], need memset to zero before this kernel // outScore OUTPUT : DType [N * samples] // outLabel OUTPUT : int [N * samples] // outSampleIdx OUTPUT : int [N * samples] // outValidSampleCount : int [N] // IPerThread * Threads >= sample-count #define MaxClassNum 255 template <typename DType, typename BoxType, int Threads = 256, int IPerThread = 4> __global__ void sortPerClass_kernel( // int N, int samples, int NClass, int background, float scoreThreshold, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, void* classStartPosPtr, void* outScorePtr, void* outLabelPtr, void* outSampleIdxPtr, void* outValidSampleCountPtr) { typedef cub::BlockExchange<DType, Threads, IPerThread> BlockExchangeKey; typedef cub::BlockExchange<int, Threads, IPerThread> BlockExchangeI; typedef cub::BlockRadixSort<DType, Threads, IPerThread, int> BlockRadixSort; typedef cub::BlockScan<int, Threads> BlockScanClass; __shared__ union { typename BlockExchangeKey::TempStorage storageKey; typename BlockExchangeI::TempStorage storageI; typename BlockRadixSort::TempStorage storageSort; typename BlockScanClass::TempStorage storageScan; } temp_storage; __shared__ int smemClassCount[MaxClassNum]; assert(NClass < MaxClassNum); assert(IPerThread * Threads >= samples); const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); const DType* inScore = static_cast<const DType*>(inScorePtr); const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr); int* classStartPos = static_cast<int*>(classStartPosPtr); DType* outScore = static_cast<DType*>(outScorePtr); BoxType* outLabel = static_cast<BoxType*>(outLabelPtr); int* outSampleIdx = static_cast<int*>(outSampleIdxPtr); int* outValidSampleCount = static_cast<int*>(outValidSampleCountPtr); for (int s = threadIdx.x; s < NClass + 1; s += blockDim.x) { smemClassCount[s] = 0; } int N = blockIdx.x; int blockOffset = N * samples; int validSamples = validSampleCount[N]; DType key[IPerThread]; int iSample[IPerThread]; for (int i = 0; i < IPerThread; ++i) { iSample[i] = -1; key[i] = -1.0f; int curIdx = i * Threads + threadIdx.x; if (curIdx < validSamples) { int label = (int) (inLabel[blockOffset + curIdx]); DType score = inScore[blockOffset + curIdx]; if (label != background && label != -1 && score >= (DType) scoreThreshold) { key[i] = getKey(score, label, NClass); iSample[i] = curIdx; } } } BlockExchangeKey(temp_storage.storageKey).StripedToBlocked(key); __syncthreads(); BlockExchangeI(temp_storage.storageI).StripedToBlocked(iSample); __syncthreads(); BlockRadixSort(temp_storage.storageSort).SortDescendingBlockedToStriped(key, iSample); __syncthreads(); // store Idx cub::StoreDirectStriped<Threads>(threadIdx.x, outSampleIdx + blockOffset, iSample, validSamples); BoxType lable[IPerThread]; DType score[IPerThread]; #pragma unroll for (int i = 0; i < IPerThread; ++i) { getScoreLable(key[i], NClass, score[i], lable[i]); } cub::StoreDirectStriped<Threads>(threadIdx.x, outScore + blockOffset, score, validSamples); cub::StoreDirectStriped<Threads>(threadIdx.x, outLabel + blockOffset, lable, validSamples); // final for (int i = 0; i < IPerThread; ++i) { if (lable[i] >= (BoxType) 0) { atomicAdd(&smemClassCount[(int) lable[i]], 1); } } __syncthreads(); int classBlockOffset = N * (NClass + 1); // Exclusive-sum, 1st is 0, last is final sum #if DUBUG_KERNEL if (N == DUBUG_BATCH && threadIdx.x == 0) { printf("sortPerClass(N:%d) final count of each label, valid samples:%d\n", N, validSamples); for (int k = 0; k < NClass; ++k) { if (smemClassCount[k] > 0) printf("Batch:%d, L:%d, count:%d, \n", N, k, smemClassCount[k]); } } __syncthreads(); #endif BlockClassSumPrefix sumPrefix; for (int s = 0; s < NClass; s += blockDim.x) { // s start from block int iClassSamples = 0; int iClass = s + threadIdx.x; if (iClass < NClass) { iClassSamples = smemClassCount[iClass]; } BlockScanClass(temp_storage.storageScan).ExclusiveSum(iClassSamples, iClassSamples, sumPrefix); __syncthreads(); if (iClass < NClass) { classStartPos[classBlockOffset + iClass] = iClassSamples; } } if (threadIdx.x == 0) { classStartPos[classBlockOffset + NClass] = sumPrefix.total; assert(sumPrefix.total <= validSamples); // background data removed. outValidSampleCount[N] = sumPrefix.total; #if DUBUG_KERNEL if (N == DUBUG_BATCH) printf("After sortPerClass, batch:%d valid samples total:%d\n", N, sumPrefix.total); #endif } } template <typename DType> __device__ __forceinline__ BBoxT<DType> readBbox(const BBoxT<DType>* inBbox, int idx) { BBoxT<DType> ret = ((BBoxT<DType>*) (inBbox))[idx]; return ret; } template <typename DType> __device__ __forceinline__ DType boxIoU(const BBoxT<DType>& a, const BBoxT<DType>& b) { BBoxT<DType> overlap = { dMAX(a.y1, b.y1), dMAX(a.x1, b.x1), dMIN(a.y2, b.y2), dMIN(a.x2, b.x2), }; DType oW = overlap.x2 - overlap.x1; DType oH = overlap.y2 - overlap.y1; if (oW < (DType) 0 || oH < (DType) 0) return (DType) 0; DType oA = oW * oH; return (oA / ((a.y2 - a.y1) * (a.x2 - a.x1) + (b.y2 - b.y1) * (b.x2 - b.x1) - oA)); } // PerClassNMS // gridDim.x : batch-N // blockDim.x : Threads // ItemsPerThreads : = divUp(samples, Threads) // outFlagSamples OUT: int [N * samples] template <typename DType, typename BoxType, int Threads = 256, int ItemsPerThreads = 4> __global__ void PerClassNMS_kernel( // int N, int samples, int NClass, const float nmsThreshold, const void* validSampleCountPtr, // const void *inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* classStartsPtr, void* outFlagSamplesPtr) { typedef BBoxT<BoxType> BBox; __shared__ struct { BBox refBox[MaxClassNum]; int endIdx[MaxClassNum]; int refIdx[MaxClassNum + 1]; bool markSamples[Threads * ItemsPerThreads]; int done; } smemClasses; assert(NClass + 1 < MaxClassNum); assert(samples <= Threads * ItemsPerThreads); const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); // const DType *inScore = static_cast<const DType *>(inScorePtr); const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr); const BBox* inBbox = static_cast<const BBox*>(inBboxPtr); const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr); const int* classStarts = static_cast<const int*>(classStartsPtr); int* outFlagSamples = static_cast<int*>(outFlagSamplesPtr); int N = blockIdx.x; int blockOffset = N * samples; int validSamples = validSampleCount[N]; if (threadIdx.x == 0) { smemClasses.done = 0; } BBox curBox[ItemsPerThreads]; int label[ItemsPerThreads]; #pragma unroll for (int ite = 0; ite * blockDim.x < validSamples; ++ite) { int curIdx = ite * blockDim.x + threadIdx.x; if (curIdx < validSamples) { label[ite] = (int) inLabel[blockOffset + curIdx]; curBox[ite] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + curIdx]); } else { label[ite] = -1; } smemClasses.markSamples[curIdx] = (label[ite] < 0 ? false : true); } int classBlockOffset = N * (NClass + 1); for (int i = threadIdx.x; i < NClass + 1; i += blockDim.x) { int refIdx = classStarts[classBlockOffset + i]; smemClasses.refIdx[i] = refIdx; smemClasses.refBox[i] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]); } __syncthreads(); for (int i = threadIdx.x; i < NClass; i += blockDim.x) { int endIdx = smemClasses.refIdx[i + 1]; smemClasses.endIdx[i] = endIdx; if (endIdx == smemClasses.refIdx[i]) { atomicAdd(&smemClasses.done, 1); } } __syncthreads(); #if DUBUG_KERNEL // print info if (N == DUBUG_BATCH && threadIdx.x == 0) { printf("batch:%d, before starting NMS, done count:%d\n", N, smemClasses.done); printf("batch:%d, Total num:%d, startPos:\n", N, validSamples); for (int k = 0; k < NClass; ++k) { if (smemClasses.refIdx[k] != smemClasses.endIdx[k]) { printf("Batch:%d, label:%d [%d : %d], check ref-label:%d\n", N, k, smemClasses.refIdx[k], smemClasses.endIdx[k], (int) inLabel[blockOffset + smemClasses.refIdx[k]]); } } printf("\n"); } __syncthreads(); #endif // class done to check stop point while (smemClasses.done < NClass) { for (int ite = 0; ite * blockDim.x < validSamples; ++ite) { int curIdx = ite * blockDim.x + threadIdx.x; int refIdx = -1; int endIdx = -1; if (curIdx < validSamples && smemClasses.markSamples[curIdx]) { if (label[ite] >= 0) { refIdx = smemClasses.refIdx[label[ite]]; endIdx = smemClasses.endIdx[label[ite]]; if (curIdx > refIdx && curIdx < endIdx) { BBox refBox = smemClasses.refBox[label[ite]]; if (boxIoU(refBox, curBox[ite]) > (DType) nmsThreshold) { smemClasses.markSamples[curIdx] = false; } } } } } __syncthreads(); // push refIdx/refBox forward to next mark // only the refIdx thread to push itself. other threads idle for (int i = threadIdx.x; i < NClass; i += blockDim.x) { int refIdx = smemClasses.refIdx[i]; int endIdx = smemClasses.endIdx[i]; if (refIdx < endIdx) { do { ++refIdx; } while (refIdx < endIdx && smemClasses.markSamples[refIdx] == false); smemClasses.refIdx[i] = refIdx; if (refIdx < endIdx) { smemClasses.refBox[i] = readBbox(inBbox, blockOffset + inBboxRefIdx[blockOffset + refIdx]); } else { atomicAdd(&smemClasses.done, 1); } } } __syncthreads(); } // no need to write all data out for (int segment = 0; segment < validSamples; segment += blockDim.x) { int curIdx = segment + threadIdx.x; if (curIdx < validSamples) { outFlagSamples[blockOffset + curIdx] = (smemClasses.markSamples[curIdx] ? 1 : 0); } } } // TopKGather // gridDim.x : batch-N // blockDim.x : Threads // ItemsPerThreads : = divUp(samples, Threads) // outDetectionCount : int [N], must be set 0 before kernel #define MaxItemsPerThreads 8 template <typename DType, typename BoxType, int Threads = 256> __global__ void TopKGatherProposal_kernel(int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outBboxPtr) { typedef BBoxT<BoxType> BBox; typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1; typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2; typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3; typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4; typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5; typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6; typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7; typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8; __shared__ union { typename BlockRadixSort8::TempStorage sort8; typename BlockRadixSort7::TempStorage sort7; typename BlockRadixSort6::TempStorage sort6; typename BlockRadixSort5::TempStorage sort5; typename BlockRadixSort4::TempStorage sort4; typename BlockRadixSort3::TempStorage sort3; typename BlockRadixSort2::TempStorage sort2; typename BlockRadixSort1::TempStorage sort1; } temp_storage; assert(MaxItemsPerThreads * Threads >= samples); const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); const DType* inScore = static_cast<const DType*>(inScorePtr); const BBox* inBbox = static_cast<const BBox*>(inBboxPtr); const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr); const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr); BBox* outBbox = static_cast<BBox*>(outBboxPtr); int N = blockIdx.x; int blockOffset = N * samples; int validSamples = validSampleCount[N]; int finalTopK = dMIN(keepTopK, validSamples); int idx[MaxItemsPerThreads]; DType score[MaxItemsPerThreads]; int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x; for (int ite = 0; ite < totalItems; ++ite) { int curIdx = ite * blockDim.x + threadIdx.x; if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx]) { idx[ite] = curIdx; score[ite] = inScore[blockOffset + curIdx]; } else { idx[ite] = -1; score[ite] = 0.0f; } } switch (totalItems) { case 0: break; case 1: BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx); break; case 2: BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx); break; case 3: BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx); break; case 4: BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx); break; case 5: BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx); break; case 6: BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx); break; case 7: BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx); break; case 8: BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx); break; default: assert(false); } __syncthreads(); int outBlockOffset = N * keepTopK; int topkItems = (keepTopK + (Threads - 1)) / Threads; for (int i = 0; i < topkItems; ++i) { int curI = i * blockDim.x + threadIdx.x; if (curI < keepTopK) { BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f}; if (curI < finalTopK && idx[i] >= 0 && score[i] > MinValidScore) { oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]]; } ((BBox*) outBbox)[outBlockOffset + curI] = oB; } } } #define MaxItemsPerThreads 8 template <typename DType, typename BoxType, int Threads = 256> __global__ void TopKGather_kernel(int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outDetectionPtr) { typedef BBoxT<BoxType> BBox; typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1; typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2; typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3; typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4; typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5; typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6; typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7; typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8; __shared__ union { typename BlockRadixSort8::TempStorage sort8; typename BlockRadixSort7::TempStorage sort7; typename BlockRadixSort6::TempStorage sort6; typename BlockRadixSort5::TempStorage sort5; typename BlockRadixSort4::TempStorage sort4; typename BlockRadixSort3::TempStorage sort3; typename BlockRadixSort2::TempStorage sort2; typename BlockRadixSort1::TempStorage sort1; } temp_storage; assert(MaxItemsPerThreads * Threads >= samples); const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); const DType* inScore = static_cast<const DType*>(inScorePtr); const BoxType* inLabel = static_cast<const BoxType*>(inLabelPtr); // InLabel keeps INT32 const BBox* inBbox = static_cast<const BBox*>(inBboxPtr); const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr); const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr); DType* outDetections = static_cast<DType*>(outDetectionPtr); int N = blockIdx.x; int blockOffset = N * samples; int validSamples = validSampleCount[N]; int finalTopK = dMIN(keepTopK, validSamples); int idx[MaxItemsPerThreads]; DType score[MaxItemsPerThreads]; int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x; for (int ite = 0; ite < totalItems; ++ite) { int curIdx = ite * blockDim.x + threadIdx.x; if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx]) { idx[ite] = curIdx; score[ite] = inScore[blockOffset + curIdx]; } else { idx[ite] = -1; score[ite] = 0.0f; } } switch (totalItems) { case 0: break; case 1: BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx); break; case 2: BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx); break; case 3: BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx); break; case 4: BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx); break; case 5: BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx); break; case 6: BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx); break; case 7: BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx); break; case 8: BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx); break; default: assert(false); } __syncthreads(); int outBlockOffset = N * keepTopK; int topkItems = (keepTopK + (Threads - 1)) / Threads; for (int i = 0; i < topkItems; ++i) { int curI = i * blockDim.x + threadIdx.x; if (curI < keepTopK) { BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f}; DType oS = 0.0f; BoxType oL = -1; if (curI < finalTopK && idx[i] >= 0 && score[i] > MinValidScore) { oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]]; oS = score[i]; oL = (BoxType) inLabel[blockOffset + idx[i]]; } outDetections[(outBlockOffset + curI) * 6] = oB.y1; outDetections[(outBlockOffset + curI) * 6 + 1] = oB.x1; outDetections[(outBlockOffset + curI) * 6 + 2] = oB.y2; outDetections[(outBlockOffset + curI) * 6 + 3] = oB.x2; outDetections[(outBlockOffset + curI) * 6 + 4] = oL; outDetections[(outBlockOffset + curI) * 6 + 5] = oS; } } } RefineDetectionWorkSpace::RefineDetectionWorkSpace( const int batchSize, const int sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType inType) : argMaxScoreDims(sampleCount, 1) , argMaxBboxDims(sampleCount, 4) , argMaxLabelDims(sampleCount, 1) , sortClassScoreDims(sampleCount, 1) , sortClassLabelDims(sampleCount, 1) , sortClassSampleIdxDims(sampleCount + 1, 1) , sortClassPosDims(param.numClasses + 1, 1) , sortNMSMarkDims(sampleCount, 1) { size_t sumSize = 0; const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT; // resource // arMaxScore : [N, samples] : m_Type argMaxScoreOffset = sumSize; sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize); argMaxBboxOffset = sumSize; // argMaxBbox : [N, samples, 4] : m_Type sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize); argMaxLabelOffset = sumSize; // argMaxLabel : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassScoreOffset = sumSize; // sortClassScore : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize); sortClassLabelOffset = sumSize; // sortClassLabel : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize); sortClassSampleIdxOffset = sumSize; // sortClassSampleIdx : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassValidCountOffset = sumSize; // sortClassValidCount : [N, 1] : kINT32 sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassPosOffset = sumSize; // sortClassPos : [N, numClasses+1] : kINT32 sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortNMSMarkOffset = sumSize; // sortNMSMark : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); totalSize = sumSize; } ProposalWorkSpace::ProposalWorkSpace(const int batchSize, const int inputCnt, const int sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType inType) : preRefineScoreDims(inputCnt, 1) , preRefineSortedScoreDims(inputCnt, 1) , preRefineBboxDims(inputCnt, 4) , argMaxScoreDims(sampleCount, 1) , argMaxBboxDims(sampleCount, 4) , argMaxLabelDims(sampleCount, 1) , sortClassScoreDims(sampleCount, 1) , sortClassLabelDims(sampleCount, 1) , sortClassSampleIdxDims(sampleCount, 1) , sortClassPosDims(param.numClasses + 1, 1) , sortNMSMarkDims(sampleCount, 1) { size_t sumSize = 0; const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT; // resource // temp storage size for sorting scores tempStorageOffset = sumSize; sumSize += (1 << 23) * batchSize; // preRefineScore : [N, inputcnt, 1] // extracted foreground score from inputs[0] preRefineScoreOffset = sumSize; sumSize += AlignMem(dimVolume(preRefineScoreDims) * typeSize(type) * batchSize); // preRefineSortedScore: [N, inputcnt, 1] preRefineSortedScoreOffset = sumSize; sumSize += AlignMem(dimVolume(preRefineSortedScoreDims) * typeSize(type) * batchSize); // preRefineBbox: [N, inputcnt, 4] // sorted bbox preRefineBboxOffset = sumSize; sumSize += AlignMem(dimVolume(preRefineBboxDims) * typeSize(type) * batchSize); // arMaxScore : [N, samples] : m_Type argMaxScoreOffset = sumSize; sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize); argMaxBboxOffset = sumSize; // argMaxBbox : [N, samples, 4] : m_Type sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize); argMaxLabelOffset = sumSize; // argMaxLabel : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassScoreOffset = sumSize; // sortClassScore : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize); sortClassLabelOffset = sumSize; // sortClassLabel : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize); sortClassSampleIdxOffset = sumSize; // sortClassSampleIdx : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassValidCountOffset = sumSize; // sortClassValidCount : [N, 1] : kINT32 sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassPosOffset = sumSize; // sortClassPos : [N, numClasses+1] : kINT32 sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortNMSMarkOffset = sumSize; // sortNMSMark : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); totalSize = sumSize; } MultilevelProposeROIWorkSpace::MultilevelProposeROIWorkSpace(const int batchSize, const int inputCnt, const int sampleCount, const RefineNMSParameters& param, const nvinfer1::DataType inType) : preRefineSortedScoreDims(inputCnt, 1) , preRefineBboxDims(inputCnt, 4) , argMaxScoreDims(sampleCount, 1) , argMaxBboxDims(sampleCount, 4) , argMaxLabelDims(sampleCount, 1) , sortClassScoreDims(sampleCount, 1) , sortClassLabelDims(sampleCount, 1) , sortClassSampleIdxDims(sampleCount+1, 1) , sortClassPosDims(param.numClasses + 1, 1) , sortNMSMarkDims(sampleCount, 1) { size_t sumSize = 0; const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT; // resource // temp storage size for sorting scores tempStorageOffset = sumSize; sumSize += (1 << 23) * batchSize; // preRefineSortedScore: [N, inputcnt, 1] preRefineSortedScoreOffset = sumSize; sumSize += AlignMem(dimVolume(preRefineSortedScoreDims) * typeSize(type) * batchSize); // preRefineBbox: [N, inputcnt, 4] // sorted bbox preRefineBboxOffset = sumSize; sumSize += AlignMem(dimVolume(preRefineBboxDims) * typeSize(type) * batchSize); // argMaxScore : [N, samples] : m_Type argMaxScoreOffset = sumSize; sumSize += AlignMem(dimVolume(argMaxScoreDims) * typeSize(type) * batchSize); argMaxBboxOffset = sumSize; // argMaxBbox : [N, samples, 4] : m_Type sumSize += AlignMem(dimVolume(argMaxBboxDims) * typeSize(type) * batchSize); argMaxLabelOffset = sumSize; // argMaxLabel : [N, samples] : m_Type sumSize += AlignMem(dimVolume(argMaxLabelDims) * typeSize(type) * batchSize); sortClassScoreOffset = sumSize; // sortClassScore : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassScoreDims) * typeSize(type) * batchSize); sortClassLabelOffset = sumSize; // sortClassLabel : [N, samples] : m_Type sumSize += AlignMem(dimVolume(sortClassLabelDims) * typeSize(type) * batchSize); sortClassSampleIdxOffset = sumSize; // sortClassSampleIdx : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortClassSampleIdxDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassValidCountOffset = sumSize; // sortClassValidCount : [N, 1] : kINT32 sumSize += AlignMem(dimVolume(sortClassValidCountDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortClassPosOffset = sumSize; // sortClassPos : [N, numClasses+1] : kINT32 sumSize += AlignMem(dimVolume(sortClassPosDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); sortNMSMarkOffset = sumSize; // sortNMSMark : [N, samples] : kINT32 sumSize += AlignMem(dimVolume(sortNMSMarkDims) * typeSize(nvinfer1::DataType::kINT32) * batchSize); totalSize = sumSize; } ConcatTopKWorkSpace::ConcatTopKWorkSpace(const int batchSize, const int concatCnt, const int topK,const nvinfer1::DataType inType) : concatedScoreDims(concatCnt*topK, 1) , concatedBBoxDims(concatCnt*topK, 4) , sortedScoreDims(concatCnt*topK, 1) , sortedBBoxDims(concatCnt*topK, 4) { size_t sumSize = 0; const nvinfer1::DataType type = nvinfer1::DataType::kFLOAT; // resource // temp storage size for sorting scores tempStorageOffset = sumSize; sumSize += (1 << 23) * batchSize; // concatedScoreOffset: [N, concatCnt*topK, 1] concatedScoreOffset = sumSize; sumSize += AlignMem(dimVolume(concatedScoreDims) * typeSize(type) * batchSize); // concatedBBoxOffset: [N, concatCnt*topK, 4] concatedBBoxOffset = sumSize; sumSize += AlignMem(dimVolume(concatedBBoxDims) * typeSize(type) * batchSize); // sortedScoreOffset: [N, concatCnt * topK, 1] sortedScoreOffset = sumSize; sumSize += AlignMem(dimVolume(sortedScoreDims) * typeSize(type) * batchSize); // sortedBBoxOffset: [N, concatCnt * topK, 4] sortedBBoxOffset = sumSize; sumSize += AlignMem(dimVolume(sortedBBoxDims) * typeSize(type) * batchSize); totalSize = sumSize; } template <int Threads> cudaError_t argMaxGroup(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass, const void* inScore, const void* inBbox, const void* validSamples, void* outScore, void* outLabel, void* outBbox) { int maxGridX = dMIN(samples, 512 / N); dim3 gridDim = {(unsigned int) nAlignDown(maxGridX, 32), (unsigned int) N, 1}; dim3 threads = {Threads, 1, 1}; switch (dtype) { case nvinfer1::DataType::kFLOAT: argMaxGroup_kernel<float, float, Threads><<<gridDim, threads, 0, stream>>>( samples, 0, NClass, inScore, inBbox, validSamples, outScore, outLabel, outBbox); break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return cudaGetLastError(); } template <int Threads> cudaError_t argMaxWOBackground(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass, const void* inScore, const void* inBbox, const void* validSamples, void* outScore, void* outLabel, void* outBbox) { int maxGridX = dMIN(samples, 512 / N); dim3 gridDim = {(unsigned int) nAlignDown(maxGridX, 32), (unsigned int) N, 1}; dim3 threads = {Threads, 1, 1}; switch (dtype) { case nvinfer1::DataType::kFLOAT: argMaxGroup_kernel<float, float, Threads><<<gridDim, threads, 0, stream>>>( samples, 1, NClass, inScore, inBbox, validSamples, outScore, outLabel, outBbox); break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return cudaGetLastError(); } template <int Threads, int ItermPerThreads> cudaError_t sortPerClass(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass, int background, float scoreThreshold, const void* inSampleValidCount, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, void* outclassStartPosPtr, void* outScorePtr, void* outLabelPtr, void* outSampleIdxPtr, void* outValidSampleCountPtr) { int blocks = N; int threads = Threads; switch (dtype) { case nvinfer1::DataType::kFLOAT: sortPerClass_kernel<float, float, Threads, ItermPerThreads><<<blocks, threads, 0, stream>>>(samples, NClass, background, scoreThreshold, inSampleValidCount, inScorePtr, inLabelPtr, inBboxPtr, outclassStartPosPtr, outScorePtr, outLabelPtr, outSampleIdxPtr, outValidSampleCountPtr); break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return cudaGetLastError(); }; template <int Threads> cudaError_t PerClassNMS(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int NClass, const float nmsThreshold, const void* validSampleCount, // const void *inScore, const void* inLabel, const void* inBbox, const void* inBboxRefIdx, const void* classStarts, void* outFlagSamples) { int blocks = N; int threads = Threads; switch (dtype) { case nvinfer1::DataType::kFLOAT: PerClassNMS_kernel<float, float, Threads><<<blocks, threads, 0, stream>>>(samples, NClass, nmsThreshold, validSampleCount, inLabel, inBbox, inBboxRefIdx, classStarts, outFlagSamples); break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return cudaGetLastError(); } template <int Threads> cudaError_t KeepTopKGather(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outDetections, int proposal) { int blocks = N; int threads = Threads; switch (dtype) { case nvinfer1::DataType::kFLOAT: if (proposal) { TopKGatherProposal_kernel<float, float, Threads><<<blocks, threads, 0, stream>>>(samples, keepTopK, validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outDetections); } else { TopKGather_kernel<float, float, Threads><<<blocks, threads, 0, stream>>>(samples, keepTopK, validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outDetections); } break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return cudaGetLastError(); } // TopKGather For TLT RPN Proposal // gridDim.x : batch-N // blockDim.x : Threads // ItemsPerThreads : = divUp(samples, Threads) // outDetectionCount : int [N], must be set 0 before kernel #define MaxItemsPerThreads 8 template <typename DType, typename BoxType, int Threads = 256> __global__ void TopKGatherBoxScore_kernel(int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outScorePtr, void* outBboxPtr) { typedef cub::BlockRadixSort<DType, Threads, 1, int> BlockRadixSort1; typedef cub::BlockRadixSort<DType, Threads, 2, int> BlockRadixSort2; typedef cub::BlockRadixSort<DType, Threads, 3, int> BlockRadixSort3; typedef cub::BlockRadixSort<DType, Threads, 4, int> BlockRadixSort4; typedef cub::BlockRadixSort<DType, Threads, 5, int> BlockRadixSort5; typedef cub::BlockRadixSort<DType, Threads, 6, int> BlockRadixSort6; typedef cub::BlockRadixSort<DType, Threads, 7, int> BlockRadixSort7; typedef cub::BlockRadixSort<DType, Threads, 8, int> BlockRadixSort8; __shared__ union { typename BlockRadixSort8::TempStorage sort8; typename BlockRadixSort7::TempStorage sort7; typename BlockRadixSort6::TempStorage sort6; typename BlockRadixSort5::TempStorage sort5; typename BlockRadixSort4::TempStorage sort4; typename BlockRadixSort3::TempStorage sort3; typename BlockRadixSort2::TempStorage sort2; typename BlockRadixSort1::TempStorage sort1; } temp_storage; assert(MaxItemsPerThreads * Threads >= samples); typedef BBoxT<BoxType> BBox; const int* validSampleCount = static_cast<const int*>(validSampleCountPtr); const DType* inScore = static_cast<const DType*>(inScorePtr); const BBox* inBbox = static_cast<const BBox*>(inBboxPtr); const int* inBboxRefIdx = static_cast<const int*>(inBboxRefIdxPtr); const int* inFlagSamples = static_cast<const int*>(inFlagSamplesPtr); BBox* outBbox = static_cast<BBox*>(outBboxPtr); DType* outScore = static_cast<DType*>(outScorePtr); int N = blockIdx.x; int blockOffset = N * samples; int validSamples = validSampleCount[N]; int finalTopK = dMIN(keepTopK, validSamples); int idx[MaxItemsPerThreads]; DType score[MaxItemsPerThreads]; int totalItems = (validSamples + (blockDim.x - 1)) / blockDim.x; for (int ite = 0; ite < totalItems; ++ite) { int curIdx = ite * blockDim.x + threadIdx.x; if (curIdx < validSamples && inFlagSamples[blockOffset + curIdx]) { idx[ite] = curIdx; score[ite] = inScore[blockOffset + curIdx]; } else { idx[ite] = -1; score[ite] = 0.0f; } } switch (totalItems) { case 0: break; case 1: BlockRadixSort1(temp_storage.sort1).SortDescendingBlockedToStriped((DType(&)[1]) score, (int(&)[1]) idx); break; case 2: BlockRadixSort2(temp_storage.sort2).SortDescendingBlockedToStriped((DType(&)[2]) score, (int(&)[2]) idx); break; case 3: BlockRadixSort3(temp_storage.sort3).SortDescendingBlockedToStriped((DType(&)[3]) score, (int(&)[3]) idx); break; case 4: BlockRadixSort4(temp_storage.sort4).SortDescendingBlockedToStriped((DType(&)[4]) score, (int(&)[4]) idx); break; case 5: BlockRadixSort5(temp_storage.sort5).SortDescendingBlockedToStriped((DType(&)[5]) score, (int(&)[5]) idx); break; case 6: BlockRadixSort6(temp_storage.sort6).SortDescendingBlockedToStriped((DType(&)[6]) score, (int(&)[6]) idx); break; case 7: BlockRadixSort7(temp_storage.sort7).SortDescendingBlockedToStriped((DType(&)[7]) score, (int(&)[7]) idx); break; case 8: BlockRadixSort8(temp_storage.sort8).SortDescendingBlockedToStriped((DType(&)[8]) score, (int(&)[8]) idx); break; default: assert(false); } __syncthreads(); int outBlockOffset = N * keepTopK; int topkItems = (keepTopK + (Threads - 1)) / Threads; for (int i = 0; i < topkItems; ++i) { int curI = i * blockDim.x + threadIdx.x; if (curI < keepTopK) { BBox oB = {(BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f, (BoxType) 0.0f}; DType oS = 0.0f; if (curI < finalTopK && idx[i] >= 0) { oB = ((BBox*) inBbox)[blockOffset + inBboxRefIdx[blockOffset + idx[i]]]; oS = score[i]; } ((BBox*) outBbox)[outBlockOffset + curI] = oB; outScore[outBlockOffset + curI] = oS; } } } template <int Threads> cudaError_t KeepTopKGatherBoxScore(cudaStream_t stream, int N, nvinfer1::DataType dtype, int samples, int keepTopK, const void* validSampleCountPtr, const void* inScorePtr, const void* inLabelPtr, const void* inBboxPtr, const void* inBboxRefIdxPtr, const void* inFlagSamplesPtr, void* outScores, void* outDetections, int proposal) { int blocks = N; int threads = Threads; switch (dtype) { case nvinfer1::DataType::kFLOAT: if (proposal) { TopKGatherBoxScore_kernel<float, float, Threads><<<blocks, threads, 0, stream>>>(samples, keepTopK, validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outScores, outDetections); } else { TopKGather_kernel<float, float, Threads><<<blocks, threads, 0, stream>>>(samples, keepTopK, validSampleCountPtr, inScorePtr, inLabelPtr, inBboxPtr, inBboxRefIdxPtr, inFlagSamplesPtr, outDetections); } break; case nvinfer1::DataType::kHALF: break; default: assert(false); } return cudaGetLastError(); } cudaError_t RefineBatchClassNMS(cudaStream_t stream, int N, int samples, nvinfer1::DataType dtype, const RefineNMSParameters& param, const RefineDetectionWorkSpace& refineOffset, void* workspace, const void* inScores, const void* inDelta, const void* inCountValid, const void* inROI, void* outDetections) { int NClass = param.numClasses; int8_t* wsPtr = static_cast<int8_t*>(workspace); void* argMaxScorePtr = wsPtr + refineOffset.argMaxScoreOffset; void* argMaxLabelPtr = wsPtr + refineOffset.argMaxLabelOffset; void* argMaxBBoxPtr = wsPtr + refineOffset.argMaxBboxOffset; void* sortClassScorePtr = wsPtr + refineOffset.sortClassScoreOffset; void* sortClassLabelPtr = wsPtr + refineOffset.sortClassLabelOffset; void* sortClassSampleIdxPtr = wsPtr + refineOffset.sortClassSampleIdxOffset; void* sortClassValidCountPtr = wsPtr + refineOffset.sortClassValidCountOffset; void* sortClassPosPtr = wsPtr + refineOffset.sortClassPosOffset; void* sortNMSMarkPtr = wsPtr + refineOffset.sortNMSMarkOffset; cudaError_t status = cudaSuccess; CUASSERT(cudaMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream)); if (NClass > 1) { // multiple classes status = argMaxGroup<32>(stream, N, dtype, samples, NClass, inScores, inDelta, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr); // argMaxBBoxPtr means delta of bboxes assert(status == cudaSuccess); CUASSERT(status); } else { // Only one class argMaxScorePtr = const_cast<void*>(inScores); argMaxBBoxPtr = const_cast<void*>(inDelta); int threads = 512; int blocks = (N * samples + threads - 1) / threads; blocks = dMIN(blocks, 8); switch (dtype) { case nvinfer1::DataType::kFLOAT: { resetMemValue_kernel<float><<<blocks, threads, 0, stream>>>(argMaxLabelPtr, N * samples, 0); break; } case nvinfer1::DataType::kHALF: { break; } default: assert(false); } } status = ApplyDelta2Bboxes(stream, N, samples, inROI, argMaxBBoxPtr, argMaxBBoxPtr); assert(status == cudaSuccess); if (samples <= 1024) { status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 2048) { status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 4096) { status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else { assert(false && "unsupported sortPerClass"); return cudaErrorLaunchFailure; } assert(status == cudaSuccess); CUASSERT(status); status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr, // sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr); assert(status == cudaSuccess); CUASSERT(status); status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outDetections, 0); assert(status == cudaSuccess); CUASSERT(status); return status; } cudaError_t DetectionPostProcess(cudaStream_t stream, int N, int samples, const float* regWeight, const float inputHeight, const float inputWidth, nvinfer1::DataType dtype, const RefineNMSParameters& param, const RefineDetectionWorkSpace& refineOffset, void* workspace, const void* inScores, const void* inDelta, const void* inCountValid, const void* inROI, void* outDetections) { int NClass = param.numClasses; int8_t* wsPtr = static_cast<int8_t*>(workspace); void* argMaxScorePtr = wsPtr + refineOffset.argMaxScoreOffset; void* argMaxLabelPtr = wsPtr + refineOffset.argMaxLabelOffset; void* argMaxBBoxPtr = wsPtr + refineOffset.argMaxBboxOffset; void* sortClassScorePtr = wsPtr + refineOffset.sortClassScoreOffset; void* sortClassLabelPtr = wsPtr + refineOffset.sortClassLabelOffset; void* sortClassSampleIdxPtr = wsPtr + refineOffset.sortClassSampleIdxOffset; void* sortClassValidCountPtr = wsPtr + refineOffset.sortClassValidCountOffset; void* sortClassPosPtr = wsPtr + refineOffset.sortClassPosOffset; void* sortNMSMarkPtr = wsPtr + refineOffset.sortNMSMarkOffset; cudaError_t status = cudaSuccess; CUASSERT(cudaMemsetAsync(argMaxScorePtr, 0, N * samples * sizeof(float), stream)); CUASSERT(cudaMemsetAsync(argMaxBBoxPtr, 0, N * samples * 4* sizeof(float), stream)); CUASSERT(cudaMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream)); CUASSERT(cudaMemsetAsync(sortClassPosPtr, 0, N * (NClass+1) * sizeof(int), stream)); CUASSERT(cudaMemsetAsync(sortClassSampleIdxPtr, 0, N * (samples + 1) * sizeof(int), stream)); if (NClass > 1) { // multiple classes status = argMaxWOBackground<32>(stream, N, dtype, samples, NClass, inScores, inDelta, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr); // argMaxBBoxPtr means delta of bboxes assert(status == cudaSuccess); CUASSERT(status); } else { // Only one class argMaxScorePtr = const_cast<void*>(inScores); argMaxBBoxPtr = const_cast<void*>(inDelta); int threads = 512; int blocks = (N * samples + threads - 1) / threads; blocks = dMIN(blocks, 8); switch (dtype) { case nvinfer1::DataType::kFLOAT: { resetMemValue_kernel<float><<<blocks, threads, 0, stream>>>(argMaxLabelPtr, N * samples, 0); break; } case nvinfer1::DataType::kHALF: { break; } default: assert(false); } } status = DecodeBBoxes(stream, N, samples, regWeight, inputHeight, inputWidth, inROI, argMaxBBoxPtr, argMaxBBoxPtr); assert(status == cudaSuccess); if (samples <= 1024) { status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 2048) { status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 4096) { status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else { assert(false && "unsupported sortPerClass"); return cudaErrorLaunchFailure; } assert(status == cudaSuccess); CUASSERT(status); status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr, // sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr); CUASSERT(status); status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outDetections, 0); CUASSERT(status); return status; } struct BF_SCORE { float bg, fg; }; // in_scores : [N, samples, 2] // output_score : [N, samples, 1] __global__ void extract_fg_kernel(int samples, const void* in_scores, void* output_score) { const BF_SCORE* in = static_cast<const BF_SCORE*>(in_scores); float* out = static_cast<float*>(output_score); int N = blockIdx.x; int blockOffset = N * samples; int totalItems = (samples + (blockDim.x - 1)) / blockDim.x; for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; out[blockOffset + cur_id] = in[blockOffset + cur_id].fg; } } __global__ void set_offset_kernel(int stride, int size, int* output) { // One block, because batch size shouldn't be too large. for (int i = threadIdx.x; i < size; i += blockDim.x) { output[i] = i * stride; } } __global__ void resample_kernel(int orig_size, int sample_size, const void* orig_score_ptr, const void* orig_bbox_ptr, void* sampled_score_ptr, void* sampled_bbox_ptr) { const float* in_score = static_cast<const float*>(orig_score_ptr); const BBoxT<float>* in_bbox = static_cast<const BBoxT<float>*>(orig_bbox_ptr); float* out_score = static_cast<float*>(sampled_score_ptr); BBoxT<float>* out_bbox = static_cast<BBoxT<float>*>(sampled_bbox_ptr); int N = blockIdx.x; int blockOffset_in = N * orig_size; int blockOffset_out = N * sample_size; int realSampleCnt = dMIN(sample_size, orig_size); int totalItems = (realSampleCnt + (blockDim.x - 1)) / blockDim.x; for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; if (cur_id < realSampleCnt) { out_score[blockOffset_out + cur_id] = in_score[blockOffset_in + cur_id]; out_bbox[blockOffset_out + cur_id] = in_bbox[blockOffset_in + cur_id]; } } } cudaError_t proposalRefineBatchClassNMS(cudaStream_t stream, int N, int inputCnt, int samples, nvinfer1::DataType dtype, const RefineNMSParameters& param, const ProposalWorkSpace& proposalOffset, void* workspace, const void* inScores, //[N, inputcnt, 2] const void* inDelta, //[N, inputcnt, 4] const void* inCountValid, const void* inAnchors, //[N, inputcnt, 4] void* outProposals) { int8_t* wsPtr = static_cast<int8_t*>(workspace); void* tempStoragePtr = wsPtr + proposalOffset.tempStorageOffset; void* preRefineScorePtr = wsPtr + proposalOffset.preRefineScoreOffset; void* preRefineSortedScorePtr = wsPtr + proposalOffset.preRefineSortedScoreOffset; void* preRefineBboxPtr = wsPtr + proposalOffset.preRefineBboxOffset; void* argMaxScorePtr = wsPtr + proposalOffset.argMaxScoreOffset; void* argMaxLabelPtr = wsPtr + proposalOffset.argMaxLabelOffset; void* argMaxBBoxPtr = wsPtr + proposalOffset.argMaxBboxOffset; void* sortClassScorePtr = wsPtr + proposalOffset.sortClassScoreOffset; void* sortClassLabelPtr = wsPtr + proposalOffset.sortClassLabelOffset; void* sortClassSampleIdxPtr = wsPtr + proposalOffset.sortClassSampleIdxOffset; void* sortClassValidCountPtr = wsPtr + proposalOffset.sortClassValidCountOffset; void* sortClassPosPtr = wsPtr + proposalOffset.sortClassPosOffset; void* sortNMSMarkPtr = wsPtr + proposalOffset.sortNMSMarkOffset; cudaError_t status = cudaSuccess; CUASSERT(cudaMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream)); // extract foreground score extract_fg_kernel<<<N, dMIN(inputCnt, 1024), 0, stream>>>(inputCnt, inScores, preRefineScorePtr); CUASSERT(cudaGetLastError()); // Here, inDelta are converted to normalize coordinates based on anchors status = ApplyDelta2Bboxes(stream, N, inputCnt, inAnchors, inDelta, const_cast<void*>(inDelta)); CUASSERT(status); // sort the score // d_key_in: preRefineScorePtr [N, inputCnt, 1] // d_key_out: preRefineSortedScorePtr // d_values_in: inDelta [N, inputCnt, 4] // d_values_out: preRefineBboxPtr // num_items: inputCnt*N // num_segments: N // offsets: [0, inputCnt, inputCnt*2, ..., ] int* offsets = static_cast<int*>(tempStoragePtr); set_offset_kernel<<<1, 1024, 0, stream>>>(inputCnt, N + 1, offsets); assert(cudaGetLastError() == cudaSuccess); tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1)); size_t temp_storage_bytes = 0; cub::DeviceSegmentedRadixSort::SortPairsDescending(NULL, temp_storage_bytes, (float*) preRefineScorePtr, (float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); assert((1 << 23) * (size_t)N > temp_storage_bytes); cub::DeviceSegmentedRadixSort::SortPairsDescending(tempStoragePtr, temp_storage_bytes, (float*) preRefineScorePtr, (float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); int NClass = param.numClasses; assert(NClass == 1); if (NClass == 1) { // Only one class resample_kernel<<<N, dMIN(samples, 1024), 0, stream>>>( inputCnt, samples, preRefineSortedScorePtr, preRefineBboxPtr, argMaxScorePtr, argMaxBBoxPtr); int threads = 512; int blocks = (N * samples + threads - 1) / threads; blocks = dMIN(blocks, 8); switch (dtype) { case nvinfer1::DataType::kFLOAT: { resetMemValue_kernel<float><<<blocks, threads, 0, stream>>>(argMaxLabelPtr, N * samples, 0); break; } case nvinfer1::DataType::kHALF: { break; } default: assert(false); } } if (samples <= 1024) { status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 2048) { status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 4096) { status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else { assert(false && "unsupported sortPerClass"); return cudaErrorLaunchFailure; } CUASSERT(status); status = PerClassNMS<256>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr, // sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr); CUASSERT(status); status = KeepTopKGather<256>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outProposals, 1); CUASSERT(status); return status; } cudaError_t MultilevelPropose(cudaStream_t stream, int N, int inputCnt, int samples, const float* regWeight, const float inputHeight, const float inputWidth, nvinfer1::DataType dtype, const RefineNMSParameters& param, const MultilevelProposeROIWorkSpace& proposalOffset, void* workspace, const void* inScore, //[N, inputcnt, 1] const void* inDelta, //[N, inputcnt, 4] void* inCountValid, const void* inAnchors, //[N, inputcnt, 4] void* outScore, void* outBbox) { int8_t* wsPtr = static_cast<int8_t*>(workspace); void* tempStoragePtr = wsPtr + proposalOffset.tempStorageOffset; void* preRefineSortedScorePtr = wsPtr + proposalOffset.preRefineSortedScoreOffset; void* preRefineBboxPtr = wsPtr + proposalOffset.preRefineBboxOffset; void* argMaxScorePtr = wsPtr + proposalOffset.argMaxScoreOffset; void* argMaxLabelPtr = wsPtr + proposalOffset.argMaxLabelOffset; void* argMaxBBoxPtr = wsPtr + proposalOffset.argMaxBboxOffset; void* sortClassScorePtr = wsPtr + proposalOffset.sortClassScoreOffset; void* sortClassLabelPtr = wsPtr + proposalOffset.sortClassLabelOffset; void* sortClassSampleIdxPtr = wsPtr + proposalOffset.sortClassSampleIdxOffset; void* sortClassValidCountPtr = wsPtr + proposalOffset.sortClassValidCountOffset; void* sortClassPosPtr = wsPtr + proposalOffset.sortClassPosOffset; void* sortNMSMarkPtr = wsPtr + proposalOffset.sortNMSMarkOffset; cudaError_t status = cudaSuccess; int NClass = param.numClasses; assert(NClass == 1); CUASSERT(cudaMemsetAsync(argMaxScorePtr, 0, N * samples * sizeof(float), stream)); CUASSERT(cudaMemsetAsync(argMaxBBoxPtr, 0, N * samples * 4* sizeof(float), stream)); CUASSERT(cudaMemsetAsync(sortClassValidCountPtr, 0, N * sizeof(int), stream)); CUASSERT(cudaMemsetAsync(sortClassPosPtr, 0, N * (NClass+1) * sizeof(int), stream)); CUASSERT(cudaMemsetAsync(sortClassSampleIdxPtr, 0, N * (samples + 1) * sizeof(int), stream)); CUASSERT(cudaGetLastError()); // Here, inDelta are converted to normalize coordinates based on anchors status = DecodeBBoxes(stream, N, inputCnt, regWeight, inputHeight, inputWidth, inAnchors, inDelta, const_cast<void*>(inDelta)); CUASSERT(cudaGetLastError()); // sort the score // d_key_in: preRefineScorePtr [N, inputCnt, 1] // d_key_out: preRefineSortedScorePtr // d_values_in: inDelta [N, inputCnt, 4] // d_values_out: preRefineBboxPtr // num_items: inputCnt*N // num_segments: N // offsets: [0, inputCnt, inputCnt*2, ..., ] int* offsets = static_cast<int*>(tempStoragePtr); set_offset_kernel<<<1, 1024, 0, stream>>>(inputCnt, N + 1, offsets); CUASSERT(cudaGetLastError()); tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1)); size_t temp_storage_bytes = 0; cub::DeviceSegmentedRadixSort::SortPairsDescending(NULL, temp_storage_bytes, (float*) inScore, (float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); CUASSERT(cudaGetLastError()); assert((1 << 23) * N > temp_storage_bytes); cub::DeviceSegmentedRadixSort::SortPairsDescending(tempStoragePtr, temp_storage_bytes, (float*) inScore, (float*) preRefineSortedScorePtr, (BBoxT<float>*) inDelta, (BBoxT<float>*) preRefineBboxPtr, N * inputCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); CUASSERT(cudaGetLastError()); if (NClass == 1) { // Only one class resample_kernel<<<N, dMIN(samples, 1024), 0, stream>>>( inputCnt, samples, preRefineSortedScorePtr, preRefineBboxPtr, argMaxScorePtr, argMaxBBoxPtr); CUASSERT(cudaGetLastError()); int threads = 512; int blocks = (N * samples + threads - 1) / threads; blocks = dMIN(blocks, 8); switch (dtype) { case nvinfer1::DataType::kFLOAT: { resetMemValue_kernel<float><<<blocks, threads, 0, stream>>>(argMaxLabelPtr, N * samples, 0); CUASSERT(cudaGetLastError()); break; } case nvinfer1::DataType::kHALF: { break; } default: assert(false); } } if (samples <= 1024) { status = sortPerClass<256, 4>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 2048) { status = sortPerClass<256, 8>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else if (samples <= 4096) { status = sortPerClass<256, 16>(stream, N, dtype, samples, NClass, param.backgroundLabelId, param.scoreThreshold, inCountValid, argMaxScorePtr, argMaxLabelPtr, argMaxBBoxPtr, sortClassPosPtr, sortClassScorePtr, sortClassLabelPtr, sortClassSampleIdxPtr, sortClassValidCountPtr); } else { assert(false && "unsupported sortPerClass"); return cudaErrorLaunchFailure; } CUASSERT(cudaGetLastError()); status = PerClassNMS<1024>(stream, N, dtype, samples, NClass, param.iouThreshold, sortClassValidCountPtr, // sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortClassPosPtr, sortNMSMarkPtr); CUASSERT(cudaGetLastError()); status = KeepTopKGatherBoxScore<1024>(stream, N, dtype, samples, param.keepTopK, sortClassValidCountPtr, sortClassScorePtr, sortClassLabelPtr, argMaxBBoxPtr, sortClassSampleIdxPtr, sortNMSMarkPtr, outScore, outBbox, 1); CUASSERT(cudaGetLastError()); return status; } struct BBOX { float y1, x1, y2, x2; }; struct DELTA { float dy, dx, logdh, logdw; }; __global__ void decode_bboxes_kernel(int samples, const void* anchors, const void* delta, const float* regWeight, const float inputHeight, const float inputWidth, void* outputBbox, float bboxClipThresh) { const BBOX* anchors_in = static_cast<const BBOX*>(anchors); const DELTA* delta_in = static_cast<const DELTA*>(delta); BBOX* bbox_out = static_cast<BBOX*>(outputBbox); int N = blockIdx.x; int blockOffset = N * samples; int totalItems = (samples + (blockDim.x - 1)) / blockDim.x; for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; if (cur_id < samples) { BBOX cur_anchor_yxyx = anchors_in[blockOffset + cur_id]; // convert yxyx -> cyxhw // cy, cx, h, w /*BBOX cur_anchor_cyxhw;*/ float cur_anchor_h = (cur_anchor_yxyx.y2 - cur_anchor_yxyx.y1 + 1.0); float cur_anchor_w = (cur_anchor_yxyx.x2 - cur_anchor_yxyx.x1 + 1.0); //w float cur_anchor_yc = cur_anchor_yxyx.y1 + cur_anchor_h * 0.5; //cy float cur_anchor_xc = cur_anchor_yxyx.x1 + cur_anchor_w * 0.5; //cx DELTA cur_delta = delta_in[blockOffset + cur_id]; // divided by regWeight cur_delta.dy /= regWeight[0]; cur_delta.dx /= regWeight[1]; cur_delta.logdh /= regWeight[2]; cur_delta.logdw /= regWeight[3]; cur_delta.logdh = dMIN(cur_delta.logdh, bboxClipThresh); cur_delta.logdw = dMIN(cur_delta.logdw, bboxClipThresh); // apply delta float decoded_box_yc = cur_anchor_yc + cur_delta.dy * cur_anchor_h; float decoded_box_xc = cur_anchor_xc + cur_delta.dx * cur_anchor_w; float decoded_box_h = expf(cur_delta.logdh) * cur_anchor_h; float decoded_box_w = expf(cur_delta.logdw) * cur_anchor_w; float decoded_box_ymin = decoded_box_yc - 0.5 * decoded_box_h; float decoded_box_xmin = decoded_box_xc - 0.5 * decoded_box_w; float decoded_box_ymax = decoded_box_ymin + decoded_box_h - 1.0; float decoded_box_xmax = decoded_box_xmin + decoded_box_w - 1.0; // clip bbox: a more precision clip method based on real window could be implemented decoded_box_ymin = dMAX(dMIN(decoded_box_ymin, inputHeight - 1.0), 0.0); decoded_box_xmin = dMAX(dMIN(decoded_box_xmin, inputWidth - 1.0), 0.0); decoded_box_ymax = dMAX(dMIN(decoded_box_ymax, inputHeight - 1.0), 0.0); decoded_box_xmax = dMAX(dMIN(decoded_box_xmax, inputWidth - 1.0), 0.0); bbox_out[blockOffset + cur_id].y1 = decoded_box_ymin; bbox_out[blockOffset + cur_id].x1 = decoded_box_xmin; bbox_out[blockOffset + cur_id].y2 = decoded_box_ymax; bbox_out[blockOffset + cur_id].x2 = decoded_box_xmax; } } } cudaError_t DecodeBBoxes(cudaStream_t stream, int N, int samples, // number of anchors per image const float* regWeight, const float inputHeight, const float inputWidth, const void* anchors, // [N, anchors, (y1, x1, y2, x2)] const void* delta, //[N, anchors, (dy, dx, log(dh), log(dw)]) void* outputBbox //[N, anchors, (y1, x1, y2, x2)] ) { int blocks = N; int threads = dMIN(samples, 1024); // delta multiply bbox_std // apply delta steps: // cy = anchor_cy + dy*height // cx = anchor_cx + dx*weight // h = exp(dh)*anchor_h // w = exp(dw)*anchor_w // clip the bbox in absolute coordinates float bboxClipThresh = log(1000.0f/16.0f); decode_bboxes_kernel<<<blocks, threads, 0, stream>>>(samples, anchors, delta, regWeight, inputHeight, inputWidth, outputBbox, bboxClipThresh); return cudaGetLastError(); } __global__ void apply_delta_kernel(int samples, const void* anchors, const void* delta, void* outputBbox) { const BBOX* anchors_in = static_cast<const BBOX*>(anchors); const DELTA* delta_in = static_cast<const DELTA*>(delta); BBOX* bbox_out = static_cast<BBOX*>(outputBbox); int N = blockIdx.x; int blockOffset = N * samples; int totalItems = (samples + (blockDim.x - 1)) / blockDim.x; for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; BBOX cur_anchor_yxyx = anchors_in[blockOffset + cur_id]; // convert yxyx -> cyxhw // cy, cx, h, w BBOX cur_anchor_cyxhw; cur_anchor_cyxhw.y1 = (cur_anchor_yxyx.y1 + cur_anchor_yxyx.y2) / 2; cur_anchor_cyxhw.x1 = (cur_anchor_yxyx.x1 + cur_anchor_yxyx.x2) / 2; cur_anchor_cyxhw.y2 = (cur_anchor_yxyx.y2 - cur_anchor_yxyx.y1); cur_anchor_cyxhw.x2 = (cur_anchor_yxyx.x2 - cur_anchor_yxyx.x1); DELTA cur_delta = delta_in[blockOffset + cur_id]; // multiply std_dev cur_delta.dy *= 0.1; cur_delta.dx *= 0.1; cur_delta.logdh *= 0.2; cur_delta.logdw *= 0.2; // apply delta cur_anchor_cyxhw.y1 += cur_delta.dy * cur_anchor_cyxhw.y2; cur_anchor_cyxhw.x1 += cur_delta.dx * cur_anchor_cyxhw.x2; cur_anchor_cyxhw.y2 *= expf(cur_delta.logdh); cur_anchor_cyxhw.x2 *= expf(cur_delta.logdw); cur_anchor_yxyx.y1 = cur_anchor_cyxhw.y1 - 0.5 * cur_anchor_cyxhw.y2; cur_anchor_yxyx.x1 = cur_anchor_cyxhw.x1 - 0.5 * cur_anchor_cyxhw.x2; cur_anchor_yxyx.y2 = cur_anchor_yxyx.y1 + cur_anchor_cyxhw.y2; cur_anchor_yxyx.x2 = cur_anchor_yxyx.x1 + cur_anchor_cyxhw.x2; // clip bbox: a more precision clip method based on real window could be implemented cur_anchor_yxyx.y1 = dMAX(dMIN(cur_anchor_yxyx.y1, 1.0), 0.0); cur_anchor_yxyx.x1 = dMAX(dMIN(cur_anchor_yxyx.x1, 1.0), 0.0); cur_anchor_yxyx.y2 = dMAX(dMIN(cur_anchor_yxyx.y2, 1.0), 0.0); cur_anchor_yxyx.x2 = dMAX(dMIN(cur_anchor_yxyx.x2, 1.0), 0.0); bbox_out[blockOffset + cur_id].y1 = cur_anchor_yxyx.y1; bbox_out[blockOffset + cur_id].x1 = cur_anchor_yxyx.x1; bbox_out[blockOffset + cur_id].y2 = cur_anchor_yxyx.y2; bbox_out[blockOffset + cur_id].x2 = cur_anchor_yxyx.x2; } } cudaError_t ApplyDelta2Bboxes(cudaStream_t stream, int N, int samples, // number of anchors per image const void* anchors, // [N, anchors, (y1, x1, y2, x2)] const void* delta, //[N, anchors, (dy, dx, log(dh), log(dw)]) void* outputBbox //[N, anchors, (y1, x1, y2, x2)] ) { int blocks = N; int threads = dMIN(samples, 1024); // delta multiply bbox_std // apply delta steps: // cy = anchor_cy + dy*height // cx = anchor_cx + dx*weight // h = exp(dh)*anchor_h // w = exp(dw)*anchor_w // clip the bbox apply_delta_kernel<<<blocks, threads, 0, stream>>>(samples, anchors, delta, outputBbox); return cudaGetLastError(); } template <typename Tfeat> __device__ inline Tfeat interpolateBilinear(const Tfeat* src, xy_t srcDims, float y, float x) { const int y0 = static_cast<int>(y); const float yAlpha = y - static_cast<float>(y0); const int x0 = static_cast<int>(x); const float xAlpha = x - static_cast<float>(x0); assert(y0 < srcDims.y); assert(x0 < srcDims.x); const int y1 = (yAlpha == 0) ? y0 : y0 + 1; // ceil const int x1 = (xAlpha == 0) ? x0 : x0 + 1; // ceil assert(y1 < srcDims.y); assert(x1 < srcDims.x); const float src00 = src[(y0) *srcDims.x + (x0)]; const float src01 = src[(y0) *srcDims.x + (x1)]; const float src10 = src[(y1) *srcDims.x + (x0)]; const float src11 = src[(y1) *srcDims.x + (x1)]; const float src0 = src00 * (1 - xAlpha) + src01 * xAlpha; const float src1 = src10 * (1 - xAlpha) + src11 * xAlpha; return src0 * (1 - yAlpha) + src1 * yAlpha; } template <typename Trois, typename Tfeat> __global__ void roiAlign_kernel(int featureCount, int roiCount, float threshold, const Trois* rois, const Tfeat* P2, const xy_t P2dims, const Tfeat* P3, const xy_t P3dims, const Tfeat* P4, const xy_t P4dims, const Tfeat* P5, const xy_t P5dims, Tfeat* pooled, const xy_t poolDims) { const int batch = blockIdx.x; const int feature = blockIdx.y; for (int roiIdx = threadIdx.x; roiIdx < roiCount; roiIdx += blockDim.x) { const Trois* roi = rois + 4 * (batch * roiCount + roiIdx); const float y1 = roi[0]; const float x1 = roi[1]; const float y2 = roi[2]; const float x2 = roi[3]; if (!(0 <= y1 && y1 <= 1 && 0 <= x1 && x1 <= 1 && 0 <= y2 && y2 <= 1 && 0 <= x2 && x2 <= 1 && y1 < y2 && x1 < x2)) { continue; } else { } const float hw = (y2 - y1) * (x2 - x1); const Tfeat* src = P2; xy_t srcDims = P2dims; int iP = 2; if (hw > threshold) { src = P3; srcDims = P3dims; ++iP; } threshold *= 4; if (hw > threshold) { src = P4; srcDims = P4dims; ++iP; } threshold *= 4; if (hw > threshold) { src = P5; srcDims = P5dims; ++iP; } src += srcDims.x * srcDims.y * (batch * featureCount + feature); Tfeat* dst = pooled + poolDims.x * poolDims.y * (batch * roiCount * featureCount + roiIdx * featureCount + feature); const float yStart = y1 * (srcDims.y - 1); const float xStart = x1 * (srcDims.x - 1); const float yEnd = y2 * (srcDims.y - 1); const float xEnd = x2 * (srcDims.x - 1); const float yDelta = (yEnd - yStart) / (poolDims.y - 1); const float xDelta = (xEnd - xStart) / (poolDims.x - 1); for (int yy = 0; yy < poolDims.y; ++yy) { const float ySample = min(yStart + yDelta * yy, yEnd); for (int xx = 0; xx < poolDims.x; ++xx) { const float xSample = min(xStart + xDelta * xx, xEnd); float result = interpolateBilinear(src, srcDims, ySample, xSample); *dst = result; dst++; } } } } cudaError_t roiAlign(cudaStream_t stream, int batchSize, int featureCount, int roiCount, float firstThreshold, const void* rois, const void* const layers[], const xy_t* layerDims, void* pooled, const xy_t poolDims) { const dim3 blocks(batchSize, featureCount); const int threads(256); roiAlign_kernel<<<blocks, threads, 0, stream>>>(featureCount, roiCount, firstThreshold, static_cast<const float*>(rois), static_cast<const float*>(layers[0]), layerDims[0], static_cast<const float*>(layers[1]), layerDims[1], static_cast<const float*>(layers[2]), layerDims[2], static_cast<const float*>(layers[3]), layerDims[3], static_cast<float*>(pooled), poolDims); return cudaGetLastError(); } template <typename Trois, typename Tfeat> __global__ void roiAlignHalfCenter_kernel(int featureCount, int roiCount, float threshold, int inputHeight, int inputWidth, const Trois* rois, const Tfeat* P2, const xy_t P2dims, const Tfeat* P3, const xy_t P3dims, const Tfeat* P4, const xy_t P4dims, const Tfeat* P5, const xy_t P5dims, const Tfeat* P6, const xy_t P6dims, Tfeat* pooled, const xy_t poolDims) { const int batch = blockIdx.x; const int feature = blockIdx.y; for (int roiIdx = threadIdx.x; roiIdx < roiCount; roiIdx += blockDim.x) { const Trois* roi = rois + 4 * (batch * roiCount + roiIdx); const float y1 = roi[0]; const float x1 = roi[1]; const float y2 = roi[2]; const float x2 = roi[3]; if (!(0 <= y1 && y1 <= inputHeight && 0 <= x1 && x1 <= inputWidth && 0 <= y2 && y2 <= inputHeight && 0 <= x2 && x2 <= inputWidth && y1 < y2 && x1 < x2)) { continue; } else { } const float hw = (y2 - y1) * (x2 - x1); const Tfeat* src = P2; xy_t srcDims = P2dims; int iP = 2; if (hw > threshold) { src = P3; srcDims = P3dims; ++iP; } threshold *= 4; if (hw > threshold) { src = P4; srcDims = P4dims; ++iP; } threshold *= 4; if (hw > threshold) { src = P5; srcDims = P5dims; ++iP; } threshold *= 4; if (hw > threshold) { src = P6; srcDims = P6dims; ++iP; } src += srcDims.x * srcDims.y * (batch * featureCount + feature); Tfeat* dst = pooled + poolDims.x * poolDims.y * (batch * roiCount * featureCount + roiIdx * featureCount + feature); float scale_to_level = 1.0f; for(int i = 0; i < iP; i++) { scale_to_level *= 2.0f; } const float yStart = y1 / scale_to_level; const float xStart = x1 / scale_to_level; const float yEnd = y2 / scale_to_level; const float xEnd = x2 / scale_to_level; const float yDelta = (yEnd - yStart) / (poolDims.y); const float xDelta = (xEnd - xStart) / (poolDims.x); for (int yy = 0; yy < poolDims.y; ++yy) { const float ySample = dMIN(dMAX(yStart + yDelta * (yy + 0.5), 0.0f), srcDims.y - 1.0f); for (int xx = 0; xx < poolDims.x; ++xx) { const float xSample = dMIN(dMAX(xStart + xDelta * (xx + 0.5), 0.0f), srcDims.x - 1.0f); float result = interpolateBilinear(src, srcDims, ySample, xSample); *dst = result; dst++; } } } } cudaError_t roiAlignHalfCenter(cudaStream_t stream, int batchSize, int featureCount, int roiCount, float firstThreshold, int inputHeight, int inputWidth, const void* rois, const void* const layers[], const xy_t* layerDims, void* pooled, const xy_t poolDims) { const dim3 blocks(batchSize, featureCount); const int threads(256); roiAlignHalfCenter_kernel<<<blocks, threads, 0, stream>>>(featureCount, roiCount, firstThreshold, inputHeight, inputWidth, static_cast<const float*>(rois), static_cast<const float*>(layers[0]), layerDims[0], static_cast<const float*>(layers[1]), layerDims[1], static_cast<const float*>(layers[2]), layerDims[2], static_cast<const float*>(layers[3]), layerDims[3], static_cast<const float*>(layers[4]), layerDims[4], static_cast<float*>(pooled), poolDims); return cudaGetLastError(); } __global__ void resize_nearest_kernel_2d(int nbatch, float scale, int2 osize, float const* idata, int istride, int ibatchstride, float* odata, int ostride, int obatchstride) { int x0 = threadIdx.x + blockIdx.x * blockDim.x; int y0 = threadIdx.y + blockIdx.y * blockDim.y; int z0 = blockIdx.z; for (int batch = z0; batch < nbatch; batch += gridDim.z) { for (int oy = y0; oy < osize.y; oy += blockDim.y * gridDim.y) { for (int ox = x0; ox < osize.x; ox += blockDim.x * gridDim.x) { int ix = int(ox / scale); int iy = int(oy / scale); odata[batch * obatchstride + oy * ostride + ox] = idata[batch * ibatchstride + iy * istride + ix]; } } } } void resizeNearest(dim3 grid, dim3 block, cudaStream_t stream, int nbatch, float scale, int2 osize, float const* idata, int istride, int ibatchstride, float* odata, int ostride, int obatchstride) { resize_nearest_kernel_2d<<<grid, block, 0, stream>>>( nbatch, scale, osize, idata, istride, ibatchstride, odata, ostride, obatchstride); } struct BOX { float y1, x1, y2, x2; }; struct DETECTION { float y1, x1, y2, x2, class_id, score; }; __global__ void specialslice_kernel(int samples, const void* idata, void* odata) { int N = blockIdx.x; int blockOffset = N * samples; int totalItems = (samples + (blockDim.x - 1)) / blockDim.x; const DETECTION* in_detections = static_cast<const DETECTION*>(idata); BOX* out_bboxes = static_cast<BOX*>(odata); for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; out_bboxes[blockOffset + cur_id].y1 = in_detections[blockOffset + cur_id].y1; out_bboxes[blockOffset + cur_id].x1 = in_detections[blockOffset + cur_id].x1; out_bboxes[blockOffset + cur_id].y2 = in_detections[blockOffset + cur_id].y2; out_bboxes[blockOffset + cur_id].x2 = in_detections[blockOffset + cur_id].x2; } } void specialSlice(cudaStream_t stream, int batch_size, int boxes_cnt, const void* idata, void* odata) { int blocks = batch_size; int threads = dMIN(boxes_cnt, 2048); specialslice_kernel<<<blocks, threads, 0, stream>>>(boxes_cnt, idata, odata); } template <typename Dtype> __global__ void concatenate(int featureCnt, int sampleCnt, const void* const* inScores, const void* const* inBBox, void* outScore, void* outBBox) { int N = blockIdx.x; int outBlockOffset = N * sampleCnt * featureCnt; int inBlockOffset = N * sampleCnt; int itemsPerThread = (sampleCnt + blockDim.x - 1) / blockDim.x; Dtype* outScorePtr = static_cast<Dtype*>(outScore); BOX* outBBoxPtr = static_cast<BOX*>(outBBox); for(int fId = 0; fId < featureCnt; fId++) { const Dtype* fInScorePtr = static_cast<const Dtype*>(inScores[fId]); const BOX* fInBBoxPtr = static_cast<const BOX*>(inBBox[fId]); int featureOffset = fId * sampleCnt; for(int i = 0; i < itemsPerThread; i++) { int curId = i * blockDim.x + threadIdx.x; if (curId < sampleCnt) { outScorePtr[outBlockOffset + featureOffset + curId] = fInScorePtr[inBlockOffset + curId]; outBBoxPtr[outBlockOffset + featureOffset + curId] = fInBBoxPtr[inBlockOffset + curId]; } } } } __global__ void resampleBBox_kernel(int orig_size, int sample_size, const void* orig_bbox_ptr, void* sampled_bbox_ptr) { const BBoxT<float>* in_bbox = static_cast<const BBoxT<float>*>(orig_bbox_ptr); BBoxT<float>* out_bbox = static_cast<BBoxT<float>*>(sampled_bbox_ptr); int N = blockIdx.x; int blockOffset_in = N * orig_size; int blockOffset_out = N * sample_size; int totalItems = (sample_size + (blockDim.x - 1)) / blockDim.x; for (int i = 0; i < totalItems; i++) { int cur_id = i * blockDim.x + threadIdx.x; if (cur_id < sample_size) { out_bbox[blockOffset_out + cur_id] = in_bbox[blockOffset_in + cur_id]; } } } cudaError_t ConcatTopK(cudaStream_t stream, int N, int featureCnt, int topK, nvinfer1::DataType dtype, void* workspace, const ConcatTopKWorkSpace& spaceOffset, void** inScores, void** inBBox, void* outProposals) { //Prepare Offset int8_t* wsPtr = static_cast<int8_t*>(workspace); void* tempStoragePtr = wsPtr + spaceOffset.tempStorageOffset; void* concatedScorePtr = wsPtr + spaceOffset.concatedScoreOffset; void* concatedBBoxPtr = wsPtr + spaceOffset.concatedBBoxOffset; void* sortedScorePtr = wsPtr + spaceOffset.sortedScoreOffset; void* sortedBBoxPtr = wsPtr + spaceOffset.sortedBBoxOffset; int blocks = N; //batch_size int threads = dMIN(topK, 2048); //Concat Scores and inBBox switch (dtype) { case nvinfer1::DataType::kFLOAT: concatenate<float><<<blocks, threads, 0, stream>>>(featureCnt, topK, inScores, inBBox, concatedScorePtr, concatedBBoxPtr); CUASSERT(cudaGetLastError()); break; case nvinfer1::DataType::kHALF: assert(false); default: assert(false); } //Sort and sample topK int itemCnt = topK * featureCnt; int* offsets = static_cast<int*>(tempStoragePtr); set_offset_kernel<<<1, 1024, 0, stream>>>(itemCnt, N + 1, offsets); assert(cudaGetLastError() == cudaSuccess); tempStoragePtr = static_cast<void*>(static_cast<int*>(tempStoragePtr) + (N + 1)); //Sort size_t temp_storage_bytes = 0; cub::DeviceSegmentedRadixSort::SortPairsDescending(NULL, temp_storage_bytes, (float*) concatedScorePtr, (float*) sortedScorePtr, (BBoxT<float>*) concatedBBoxPtr, (BBoxT<float>*) sortedBBoxPtr, N * itemCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); assert((1 << 23) * N > temp_storage_bytes); cub::DeviceSegmentedRadixSort::SortPairsDescending(tempStoragePtr, temp_storage_bytes, (float*) concatedScorePtr, (float*) sortedScorePtr, (BBoxT<float>*) concatedBBoxPtr, (BBoxT<float>*) sortedBBoxPtr, N * itemCnt, N, offsets, offsets + 1, 0, 8 * sizeof(float), stream); assert(cudaGetLastError() == cudaSuccess); //Sample resampleBBox_kernel<<<N, dMIN(topK, 1024), 0, stream>>>(itemCnt, topK, sortedBBoxPtr, outProposals); assert(cudaGetLastError() == cudaSuccess); return cudaGetLastError(); }
ddd64d009cd19ed82d19344bcb383bcd3686ffcf.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <hip/hip_runtime.h> #include <optix_device.h> //#include <optixu/optixpp_namespace.h> //#include <optixu/optixu_math_stream_namespace.h> #include <optix.h> #include <optixu/optixu_math_namespace.h> #include <optixu/optixu_aabb_namespace.h> #include "Common.h" rtBuffer<float3> vertex_buffer; rtBuffer<int3> index_buffer; rtBuffer<float2> texcoord_buffer; // per vertex, indexed with index_buffer rtDeclareVariable(Hit, hit_attr, attribute hit_attr, ); rtDeclareVariable(optix::Ray, ray, rtCurrentRay, ); RT_PROGRAM void intersect(int primIdx) { const int3 v_idx = index_buffer[primIdx]; const float3 p0 = vertex_buffer[v_idx.x]; const float3 p1 = vertex_buffer[v_idx.y]; const float3 p2 = vertex_buffer[v_idx.z]; // Intersect ray with triangle float3 normal; float t, beta, gamma; if (intersect_triangle(ray, p0, p1, p2, normal, t, beta, gamma)) { if (rtPotentialIntersection(t)) { Hit h; h.t = t; h.triId = primIdx; h.u = beta; h.v = gamma; h.geom_normal = optix::normalize(normal); if (texcoord_buffer.size() == 0) { h.texcoord = optix::make_float2(0.0f, 0.0f); } else { const float2 t0 = texcoord_buffer[v_idx.x]; const float2 t1 = texcoord_buffer[v_idx.y]; const float2 t2 = texcoord_buffer[v_idx.z]; h.texcoord = t1 * beta + t2 * gamma + t0 * (1.0f - beta - gamma); } hit_attr = h; rtReportIntersection( /*material index*/ 0); } } } //------------------------------------------------------------------------------ // // Bounds program // //------------------------------------------------------------------------------ RT_PROGRAM void bounds(int primIdx, float result[6]) { const int3 v_idx = index_buffer[primIdx]; const float3 p0 = vertex_buffer[v_idx.x]; const float3 p1 = vertex_buffer[v_idx.y]; const float3 p2 = vertex_buffer[v_idx.z]; optix::Aabb* aabb = (optix::Aabb*)result; aabb->m_min = fminf(fminf(p0, p1), p2); aabb->m_max = fmaxf(fmaxf(p0, p1), p2); } //------------------------------------------------------------------------------ // // Hit program copies hit attribute into hit PRD // //------------------------------------------------------------------------------ rtDeclareVariable(Hit, hit_prd, rtPayload, ); RT_PROGRAM void closest_hit() { hit_prd = hit_attr; } //------------------------------------------------------------------------------ // // Any-hit program masks geometry with a texture // //------------------------------------------------------------------------------ rtTextureSampler<uchar4, 2, hipReadModeNormalizedFloat> mask_sampler; RT_PROGRAM void any_hit() { float4 mask = tex2D(mask_sampler, hit_attr.texcoord.x, hit_attr.texcoord.y); if (mask.x < 0.5f) { rtIgnoreIntersection(); // make surface transparent } } //------------------------------------------------------------------------------ // // Ray generation // //------------------------------------------------------------------------------ rtDeclareVariable(unsigned int, launch_index, rtLaunchIndex, ); rtDeclareVariable(rtObject, top_object, , ); rtBuffer<Hit, 1> hits; rtBuffer<Ray, 1> rays; RT_PROGRAM void ray_gen() { Hit hit_prd; hit_prd.t = -1.0f; hit_prd.triId = -1; hit_prd.u = 0.0f; hit_prd.v = 0.0f; hit_prd.geom_normal = optix::make_float3(1, 0, 0); Ray ray = rays[launch_index]; rtTrace(top_object, optix::make_Ray(ray.origin, ray.dir, 0, ray.tmin, ray.tmax), hit_prd); hits[launch_index] = hit_prd; } //------------------------------------------------------------------------------ // // Exception program for debugging only // //------------------------------------------------------------------------------ RT_PROGRAM void exception() { const unsigned int code = rtGetExceptionCode(); rtPrintf("Caught exception 0x%X at launch index (%d)\n", code, launch_index); Hit hit_prd; hit_prd.t = -1.0f; hit_prd.triId = -1; hit_prd.u = 0.0f; hit_prd.v = 0.0f; hit_prd.geom_normal = optix::make_float3(1, 0, 0); hits[launch_index] = hit_prd; }
ddd64d009cd19ed82d19344bcb383bcd3686ffcf.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cuda_runtime.h> #include <optix_device.h> //#include <optixu/optixpp_namespace.h> //#include <optixu/optixu_math_stream_namespace.h> #include <optix.h> #include <optixu/optixu_math_namespace.h> #include <optixu/optixu_aabb_namespace.h> #include "Common.h" rtBuffer<float3> vertex_buffer; rtBuffer<int3> index_buffer; rtBuffer<float2> texcoord_buffer; // per vertex, indexed with index_buffer rtDeclareVariable(Hit, hit_attr, attribute hit_attr, ); rtDeclareVariable(optix::Ray, ray, rtCurrentRay, ); RT_PROGRAM void intersect(int primIdx) { const int3 v_idx = index_buffer[primIdx]; const float3 p0 = vertex_buffer[v_idx.x]; const float3 p1 = vertex_buffer[v_idx.y]; const float3 p2 = vertex_buffer[v_idx.z]; // Intersect ray with triangle float3 normal; float t, beta, gamma; if (intersect_triangle(ray, p0, p1, p2, normal, t, beta, gamma)) { if (rtPotentialIntersection(t)) { Hit h; h.t = t; h.triId = primIdx; h.u = beta; h.v = gamma; h.geom_normal = optix::normalize(normal); if (texcoord_buffer.size() == 0) { h.texcoord = optix::make_float2(0.0f, 0.0f); } else { const float2 t0 = texcoord_buffer[v_idx.x]; const float2 t1 = texcoord_buffer[v_idx.y]; const float2 t2 = texcoord_buffer[v_idx.z]; h.texcoord = t1 * beta + t2 * gamma + t0 * (1.0f - beta - gamma); } hit_attr = h; rtReportIntersection( /*material index*/ 0); } } } //------------------------------------------------------------------------------ // // Bounds program // //------------------------------------------------------------------------------ RT_PROGRAM void bounds(int primIdx, float result[6]) { const int3 v_idx = index_buffer[primIdx]; const float3 p0 = vertex_buffer[v_idx.x]; const float3 p1 = vertex_buffer[v_idx.y]; const float3 p2 = vertex_buffer[v_idx.z]; optix::Aabb* aabb = (optix::Aabb*)result; aabb->m_min = fminf(fminf(p0, p1), p2); aabb->m_max = fmaxf(fmaxf(p0, p1), p2); } //------------------------------------------------------------------------------ // // Hit program copies hit attribute into hit PRD // //------------------------------------------------------------------------------ rtDeclareVariable(Hit, hit_prd, rtPayload, ); RT_PROGRAM void closest_hit() { hit_prd = hit_attr; } //------------------------------------------------------------------------------ // // Any-hit program masks geometry with a texture // //------------------------------------------------------------------------------ rtTextureSampler<uchar4, 2, cudaReadModeNormalizedFloat> mask_sampler; RT_PROGRAM void any_hit() { float4 mask = tex2D(mask_sampler, hit_attr.texcoord.x, hit_attr.texcoord.y); if (mask.x < 0.5f) { rtIgnoreIntersection(); // make surface transparent } } //------------------------------------------------------------------------------ // // Ray generation // //------------------------------------------------------------------------------ rtDeclareVariable(unsigned int, launch_index, rtLaunchIndex, ); rtDeclareVariable(rtObject, top_object, , ); rtBuffer<Hit, 1> hits; rtBuffer<Ray, 1> rays; RT_PROGRAM void ray_gen() { Hit hit_prd; hit_prd.t = -1.0f; hit_prd.triId = -1; hit_prd.u = 0.0f; hit_prd.v = 0.0f; hit_prd.geom_normal = optix::make_float3(1, 0, 0); Ray ray = rays[launch_index]; rtTrace(top_object, optix::make_Ray(ray.origin, ray.dir, 0, ray.tmin, ray.tmax), hit_prd); hits[launch_index] = hit_prd; } //------------------------------------------------------------------------------ // // Exception program for debugging only // //------------------------------------------------------------------------------ RT_PROGRAM void exception() { const unsigned int code = rtGetExceptionCode(); rtPrintf("Caught exception 0x%X at launch index (%d)\n", code, launch_index); Hit hit_prd; hit_prd.t = -1.0f; hit_prd.triId = -1; hit_prd.u = 0.0f; hit_prd.v = 0.0f; hit_prd.geom_normal = optix::make_float3(1, 0, 0); hits[launch_index] = hit_prd; }
a137e7093fc4183d4cdc8f32d86ad4dce8fcd7a7.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <ATen/native/hip/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <c10/util/complex.h> namespace at { namespace native { const char exp2_name[] = "exp2_kernel"; void exp2_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "exp2_cuda", [&]() { jitted_gpu_kernel</*name=*/exp2_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, exp2_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "exp2_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::exp2(a); }); }); #endif } const char i0_name[] = "i0"; void i0_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0_cuda", [&]() { jitted_gpu_kernel</*name=*/i0_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, i0_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; // implicit conversion of a to opmath_t will happen here, // but as far as TI is concerned, it's still a no-dynamic-cast kernel because lambda input is scalar_t return calc_i0<opmath_t>(a); }); }); #endif } // See note [Jiterator] const char i0e_name[] = "calc_i0e"; void i0e_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() { jitted_gpu_kernel</*name=*/i0e_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, i0e_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return calc_i0e<opmath_t>(a); }); }); #endif } // See note [Jiterator] const char i1_name[] = "i1"; void i1_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1_cuda", [&]() { jitted_gpu_kernel</*name=*/i1_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, i1_string); }); #else AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_i1(a); }); }); #endif // AT_USE_JITERATOR() } const char i1e_name[] = "i1e"; void i1e_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1e_cuda", [&]() { jitted_gpu_kernel</*name=*/i1e_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, i1e_string); }); #else AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1e_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_i1e(a); }); }); #endif } const char sigmoid_name[] = "sigmoid"; void sigmoid_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { // only jiterate for complex-dtype #if AT_USE_JITERATOR() static const auto sigmoid_string = jiterator_stringify( template <typename T> T sigmoid(T x) { return T{1} / (T{1} + ::exp(-x)); } ); // sigmoid_string AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sigmoid_cuda", [&]() { jitted_gpu_kernel< /*name=*/sigmoid_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, sigmoid_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sigmoid_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; const auto one = opmath_t{1}; return static_cast<scalar_t>(one / (one + ::exp(-opmath_t{a}))); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, common_dtype, "sigmoid_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; const auto one = opmath_t{1}; return static_cast<scalar_t>(one/(one + ::exp(-opmath_t{a}))); }); }); } } const char sinc_name[] = "sinc"; void sinc_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sinc_cuda", [&]() { jitted_gpu_kernel</*name=*/sinc_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, sinc_string); }); #else AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sinc_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { if (a == scalar_t(0)) { return scalar_t(1); } else { // NVCC says constexpr var is not accessible from device using opmath_t = at::opmath_type<scalar_t>; opmath_t product = c10::detail::pi<opmath_t>() * opmath_t{a}; return static_cast<scalar_t>(std::sin(product) / product); } }); }); #endif } void logit_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "logit_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC eps = eps_scalar.to<T_ACC>(); if (eps < T_ACC(0)) { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { const T_ACC x_acc = static_cast<T_ACC>(x); return c10::hip::compat::log(x_acc / (T_ACC(1) - x_acc)); }); } else { const T_ACC lo = eps; const T_ACC hi = T_ACC(1) - eps; gpu_kernel( iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t { const T_ACC x_acc = static_cast<T_ACC>(x); T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc); return c10::hip::compat::log(z / (T_ACC(1) - z)); }); } }); } const char ndtri_name[] = "ndtri"; void ndtri_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "ndtri_cuda", [&]() { jitted_gpu_kernel</*name=*/ndtri_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, ndtri_string); }); #else AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "ndtri_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_ndtri(a); }); }); #endif } const char log_ndtr_name[] = "log_ndtr"; void log_ndtr_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "log_ndtr_cuda", [&]() { jitted_gpu_kernel</*name=*/log_ndtr_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, log_ndtr_string); }); #else AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "log_ndtr_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_log_ndtr(a); }); }); #endif } void erf_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erf(a); }); }); } const char erfc_name[] = "erfc_kernel"; void erfc_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "erfc_cuda", [&]() { jitted_gpu_kernel</*name=*/erfc_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, erfc_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "erfc_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erfc(a); }); }); #endif } const char erfinv_name[] = "erfinv_kernel"; void erfinv_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() { jitted_gpu_kernel</*name=*/erfinv_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, erfinv_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erfinv(a); }); }); #endif } const char erfcx_name[] = "erfcx"; void erfcx_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "erfcx_cuda", [&]() { jitted_gpu_kernel</*name=*/erfcx_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, erfcx_string); }); #else AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "erfcx_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_erfcx(a); }); }); #endif } const char kaiser_window_name[] = "kaiser_window"; void kaiser_window_kernel_cuda(TensorIteratorBase& iter, int64_t window_length, double beta_){ #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){ using opmath_t = at::opmath_type<scalar_t>; const opmath_t inv_alpha = static_cast<opmath_t>(2.0 / (window_length - 1)); const opmath_t beta = static_cast<opmath_t>(beta_); const opmath_t inv_i0_beta = 1.0 / calc_i0(beta); jitted_gpu_kernel< /*name=*/kaiser_window_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>( iter, kaiser_window_string, /*scalar_pos=*/at::cuda::jit::BinaryFuncVariant::NoScalar, /*scalar_val=*/0, /*extra_args=*/std::make_tuple(inv_alpha, beta, inv_i0_beta)); }); #else AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){ using opmath_t = at::opmath_type<scalar_t>; const opmath_t inv_alpha = static_cast<opmath_t>(2.0 / (window_length - 1)); const opmath_t beta = static_cast<opmath_t>(beta_); const opmath_t inv_i0_beta = 1.0 / calc_i0(beta); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t { opmath_t x = static_cast<opmath_t>(a) * inv_alpha - 1; opmath_t y = std::max<opmath_t>(0, 1 - x * x); return calc_i0(beta * ::sqrt(y)) * inv_i0_beta; }); }); #endif } const char entr_name[] = "entr"; void entr_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "entr_cuda", [&]() { jitted_gpu_kernel</*name=*/entr_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, entr_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "entr_cuda", [&]() { gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t x) -> scalar_t { if (at::_isnan(x)) { return x; } else if (x > 0) { return -x * ::log(x); } else if (x == 0) { return 0; } return static_cast<scalar_t>(-INFINITY); }); }); #endif } REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda); REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda); REGISTER_DISPATCH(special_i0e_stub, &i0e_kernel_cuda); REGISTER_DISPATCH(special_i1_stub, &i1_kernel_cuda); REGISTER_DISPATCH(special_i1e_stub, &i1e_kernel_cuda); REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda); REGISTER_DISPATCH(sinc_stub, &sinc_kernel_cuda); REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda); REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda); REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda); REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda); REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda); REGISTER_DISPATCH(special_entr_stub, &entr_kernel_cuda); REGISTER_DISPATCH(special_ndtri_stub, &ndtri_kernel_cuda); REGISTER_DISPATCH(special_log_ndtr_stub, &log_ndtr_kernel_cuda); REGISTER_DISPATCH(special_erfcx_stub, &erfcx_kernel_cuda); } // namespace native } // namespace at
a137e7093fc4183d4cdc8f32d86ad4dce8fcd7a7.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <c10/util/complex.h> namespace at { namespace native { const char exp2_name[] = "exp2_kernel"; void exp2_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "exp2_cuda", [&]() { jitted_gpu_kernel</*name=*/exp2_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, exp2_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "exp2_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::exp2(a); }); }); #endif } const char i0_name[] = "i0"; void i0_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0_cuda", [&]() { jitted_gpu_kernel</*name=*/i0_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, i0_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; // implicit conversion of a to opmath_t will happen here, // but as far as TI is concerned, it's still a no-dynamic-cast kernel because lambda input is scalar_t return calc_i0<opmath_t>(a); }); }); #endif } // See note [Jiterator] const char i0e_name[] = "calc_i0e"; void i0e_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() { jitted_gpu_kernel</*name=*/i0e_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, i0e_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return calc_i0e<opmath_t>(a); }); }); #endif } // See note [Jiterator] const char i1_name[] = "i1"; void i1_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1_cuda", [&]() { jitted_gpu_kernel</*name=*/i1_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, i1_string); }); #else AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_i1(a); }); }); #endif // AT_USE_JITERATOR() } const char i1e_name[] = "i1e"; void i1e_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1e_cuda", [&]() { jitted_gpu_kernel</*name=*/i1e_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, i1e_string); }); #else AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1e_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_i1e(a); }); }); #endif } const char sigmoid_name[] = "sigmoid"; void sigmoid_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { // only jiterate for complex-dtype #if AT_USE_JITERATOR() static const auto sigmoid_string = jiterator_stringify( template <typename T> T sigmoid(T x) { return T{1} / (T{1} + std::exp(-x)); } ); // sigmoid_string AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sigmoid_cuda", [&]() { jitted_gpu_kernel< /*name=*/sigmoid_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, sigmoid_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sigmoid_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; const auto one = opmath_t{1}; return static_cast<scalar_t>(one / (one + std::exp(-opmath_t{a}))); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, common_dtype, "sigmoid_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; const auto one = opmath_t{1}; return static_cast<scalar_t>(one/(one + std::exp(-opmath_t{a}))); }); }); } } const char sinc_name[] = "sinc"; void sinc_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sinc_cuda", [&]() { jitted_gpu_kernel</*name=*/sinc_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, sinc_string); }); #else AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sinc_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { if (a == scalar_t(0)) { return scalar_t(1); } else { // NVCC says constexpr var is not accessible from device using opmath_t = at::opmath_type<scalar_t>; opmath_t product = c10::detail::pi<opmath_t>() * opmath_t{a}; return static_cast<scalar_t>(std::sin(product) / product); } }); }); #endif } void logit_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "logit_cuda", [&]() { using T_ACC = acc_type<scalar_t, true>; const T_ACC eps = eps_scalar.to<T_ACC>(); if (eps < T_ACC(0)) { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { const T_ACC x_acc = static_cast<T_ACC>(x); return c10::cuda::compat::log(x_acc / (T_ACC(1) - x_acc)); }); } else { const T_ACC lo = eps; const T_ACC hi = T_ACC(1) - eps; gpu_kernel( iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t { const T_ACC x_acc = static_cast<T_ACC>(x); T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc); return c10::cuda::compat::log(z / (T_ACC(1) - z)); }); } }); } const char ndtri_name[] = "ndtri"; void ndtri_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "ndtri_cuda", [&]() { jitted_gpu_kernel</*name=*/ndtri_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, ndtri_string); }); #else AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "ndtri_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_ndtri(a); }); }); #endif } const char log_ndtr_name[] = "log_ndtr"; void log_ndtr_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "log_ndtr_cuda", [&]() { jitted_gpu_kernel</*name=*/log_ndtr_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, log_ndtr_string); }); #else AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "log_ndtr_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_log_ndtr(a); }); }); #endif } void erf_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erf(a); }); }); } const char erfc_name[] = "erfc_kernel"; void erfc_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "erfc_cuda", [&]() { jitted_gpu_kernel</*name=*/erfc_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, erfc_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "erfc_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erfc(a); }); }); #endif } const char erfinv_name[] = "erfinv_kernel"; void erfinv_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() { jitted_gpu_kernel</*name=*/erfinv_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, erfinv_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { return ::erfinv(a); }); }); #endif } const char erfcx_name[] = "erfcx"; void erfcx_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "erfcx_cuda", [&]() { jitted_gpu_kernel</*name=*/erfcx_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, erfcx_string); }); #else AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "erfcx_cuda", [&]() { gpu_kernel( iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_erfcx(a); }); }); #endif } const char kaiser_window_name[] = "kaiser_window"; void kaiser_window_kernel_cuda(TensorIteratorBase& iter, int64_t window_length, double beta_){ #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){ using opmath_t = at::opmath_type<scalar_t>; const opmath_t inv_alpha = static_cast<opmath_t>(2.0 / (window_length - 1)); const opmath_t beta = static_cast<opmath_t>(beta_); const opmath_t inv_i0_beta = 1.0 / calc_i0(beta); jitted_gpu_kernel< /*name=*/kaiser_window_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>( iter, kaiser_window_string, /*scalar_pos=*/at::cuda::jit::BinaryFuncVariant::NoScalar, /*scalar_val=*/0, /*extra_args=*/std::make_tuple(inv_alpha, beta, inv_i0_beta)); }); #else AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){ using opmath_t = at::opmath_type<scalar_t>; const opmath_t inv_alpha = static_cast<opmath_t>(2.0 / (window_length - 1)); const opmath_t beta = static_cast<opmath_t>(beta_); const opmath_t inv_i0_beta = 1.0 / calc_i0(beta); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t { opmath_t x = static_cast<opmath_t>(a) * inv_alpha - 1; opmath_t y = std::max<opmath_t>(0, 1 - x * x); return calc_i0(beta * ::sqrt(y)) * inv_i0_beta; }); }); #endif } const char entr_name[] = "entr"; void entr_kernel_cuda(TensorIteratorBase& iter) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "entr_cuda", [&]() { jitted_gpu_kernel</*name=*/entr_name, /*return_dtype=*/ scalar_t, /*common_dtype=*/ scalar_t, /*arity=*/ 1>(iter, entr_string); }); #else AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "entr_cuda", [&]() { gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t x) -> scalar_t { if (at::_isnan(x)) { return x; } else if (x > 0) { return -x * std::log(x); } else if (x == 0) { return 0; } return static_cast<scalar_t>(-INFINITY); }); }); #endif } REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda); REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda); REGISTER_DISPATCH(special_i0e_stub, &i0e_kernel_cuda); REGISTER_DISPATCH(special_i1_stub, &i1_kernel_cuda); REGISTER_DISPATCH(special_i1e_stub, &i1e_kernel_cuda); REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda); REGISTER_DISPATCH(sinc_stub, &sinc_kernel_cuda); REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda); REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda); REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda); REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda); REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda); REGISTER_DISPATCH(special_entr_stub, &entr_kernel_cuda); REGISTER_DISPATCH(special_ndtri_stub, &ndtri_kernel_cuda); REGISTER_DISPATCH(special_log_ndtr_stub, &log_ndtr_kernel_cuda); REGISTER_DISPATCH(special_erfcx_stub, &erfcx_kernel_cuda); } // namespace native } // namespace at
5fa502e7f620e6414682235e16bbc19121d8e7f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ //////////////////////////////////////////////////////////////////////////////// // // NVIDIA CUDA implementation of Viola-Jones Object Detection Framework // // The algorithm and code are explained in the upcoming GPU Computing Gems // chapter in detail: // // Anton Obukhov, "Haar Classifiers for Object Detection with CUDA" // PDF URL placeholder // email: [email protected], [email protected] // // Credits for help with the code to: // Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov. // //////////////////////////////////////////////////////////////////////////////// #include <algorithm> #include <cstdio> #include "opencv2/core/cuda/warp.hpp" #include "opencv2/core/cuda/warp_shuffle.hpp" #include "opencv2/opencv_modules.hpp" #ifdef HAVE_OPENCV_OBJDETECT # include "opencv2/objdetect.hpp" # include "opencv2/objdetect/objdetect_c.h" #endif #include "opencv2/cudalegacy/NCV.hpp" #include "opencv2/cudalegacy/NPP_staging.hpp" #include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp" #include "NCVRuntimeTemplates.hpp" #include "NCVAlg.hpp" //============================================================================== // // BlockScan file // //============================================================================== NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive //Almost the same as naive scan1Inclusive, but doesn't need __syncthreads() //assuming size <= WARP_SIZE and size is power of 2 __device__ Ncv32u warpScanInclusive(Ncv32u idata, volatile Ncv32u *s_Data) { #if __CUDA_ARCH__ >= 300 const unsigned int laneId = cv::cuda::device::Warp::laneId(); // scan on shuffl functions #pragma unroll for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2) { const Ncv32u n = cv::cuda::device::shfl_up(idata, i); if (laneId >= i) idata += n; } return idata; #else Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; #endif } __device__ __forceinline__ Ncv32u warpScanExclusive(Ncv32u idata, volatile Ncv32u *s_Data) { return warpScanInclusive(idata, s_Data) - idata; } template <Ncv32u tiNumScanThreads> __device__ Ncv32u scan1Inclusive(Ncv32u idata, volatile Ncv32u *s_Data) { if (tiNumScanThreads > K_WARP_SIZE) { //Bottom-level inclusive warp scan Ncv32u warpResult = warpScanInclusive(idata, s_Data); //Save top elements of each warp for exclusive warp scan //sync to wait for warp scans to complete (because s_Data is being overwritten) __syncthreads(); if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) ) { s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult; } //wait for warp scans to complete __syncthreads(); if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) ) { //grab top warp elements Ncv32u val = s_Data[threadIdx.x]; //calculate exclusive scan and write back to shared memory s_Data[threadIdx.x] = warpScanExclusive(val, s_Data); } //return updated warp scans with exclusive scan results __syncthreads(); return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE]; } else { return warpScanInclusive(idata, s_Data); } } //============================================================================== // // HaarClassifierCascade file // //============================================================================== const Ncv32u MAX_GRID_DIM = 65535; const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64; #define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6 #define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2) /** \internal * Haar features solid array. */ texture<uint2, 1, hipReadModeElementType> texHaarFeatures; /** \internal * Haar classifiers flattened trees container. * Two parts: first contains root nodes, second - nodes that are referred by root nodes. * Drawback: breaks tree locality (might cause more cache misses * Advantage: No need to introduce additional 32-bit field to index root nodes offsets */ texture<uint4, 1, hipReadModeElementType> texHaarClassifierNodes; texture<Ncv32u, 1, hipReadModeElementType> texIImage; __device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages) { return d_Stages[iStage]; } template <NcvBool tbCacheTextureCascade> __device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes) { HaarClassifierNode128 tmpNode; if (tbCacheTextureCascade) { tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode); } else { tmpNode = d_ClassifierNodes[iNode]; } return tmpNode; } template <NcvBool tbCacheTextureCascade> __device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features, Ncv32f *weight, Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight) { HaarFeature64 feature; if (tbCacheTextureCascade) { feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature); } else { feature = d_Features[iFeature]; } feature.getRect(rectX, rectY, rectWidth, rectHeight); *weight = feature.getWeight(); } template <NcvBool tbCacheTextureIImg> __device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg) { if (tbCacheTextureIImg) { return tex1Dfetch(texIImage, x); } else { return d_IImg[x]; } } __device__ Ncv32u d_outMaskPosition; __device__ void compactBlockWriteOutAnchorParallel(Ncv32u threadPassFlag, Ncv32u threadElem, Ncv32u *vectorOut) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 __shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL * 2]; __shared__ Ncv32u numPassed; __shared__ Ncv32u outMaskOffset; Ncv32u incScan = scan1Inclusive<NUM_THREADS_ANCHORSPARALLEL>(threadPassFlag, shmem); __syncthreads(); if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1) { numPassed = incScan; outMaskOffset = atomicAdd(&d_outMaskPosition, incScan); } if (threadPassFlag) { Ncv32u excScan = incScan - threadPassFlag; shmem[excScan] = threadElem; } __syncthreads(); if (threadIdx.x < numPassed) { vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x]; } #endif } template <NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u y_offs; Ncv32u x_offs; Ncv32u maskOffset; Ncv32u outMaskVal; NcvBool bInactiveThread = false; if (tbReadPixelIndexFromVector) { maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (maskOffset >= mask1Dlen) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { outMaskVal = d_inMask[maskOffset]; y_offs = outMaskVal >> 16; x_offs = outMaskVal & 0xFFFF; } } else { y_offs = blockIdx.y; x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (x_offs >= mask2Dstride) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { maskOffset = y_offs * mask2Dstride + x_offs; if ((x_offs >= anchorsRoi.width) || (!tbInitMaskPositively && d_inMask != d_outMask && d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U)) { if (tbDoAtomicCompaction) { bInactiveThread = true; } else { d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U; return; } } outMaskVal = (y_offs << 16) | x_offs; } } NcvBool bPass = true; if (!tbDoAtomicCompaction || tbDoAtomicCompaction) { Ncv32f pixelStdDev = 0.0f; if (!bInactiveThread) pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; for (Ncv32u iStage = startStageInc; iStage < endStageExc; iStage++) { Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset(); Ncv32f stageThreshold = curStage.getStageThreshold(); while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u iNode = curRootNodeOffset; if (bPass && !bInactiveThread) { while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset++; } if (curStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; } } } __syncthreads(); if (!tbDoAtomicCompaction) { if (!tbReadPixelIndexFromVector || (tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask))) { d_outMask[maskOffset] = outMaskVal; } } else { compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread, outMaskVal, d_outMask); } } template <NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x; if (maskOffset >= mask1Dlen) { return; } Ncv32u outMaskVal = d_inMask[maskOffset]; Ncv32u y_offs = outMaskVal >> 16; Ncv32u x_offs = outMaskVal & 0xFFFF; Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; NcvBool bPass = true; for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++) { //this variable is subject to reduction Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x; Ncv32f stageThreshold = curStage.getStageThreshold(); Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2; for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++) { NcvBool bMoreNodesToTraverse = true; if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage) { Ncv32u iNode = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; //TODO: fetch into shmem if size suffices. Shmem can be shared with reduce for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL; } Ncv32f finalStageSum = subReduce<Ncv32f, functorAddValues<Ncv32f>, NUM_THREADS_CLASSIFIERPARALLEL>(curStageSum); if (finalStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } if (!tbDoAtomicCompaction) { if (!bPass || d_inMask != d_outMask) { if (!threadIdx.x) { d_outMask[maskOffset] = outMaskVal; } } } else { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 if (bPass && !threadIdx.x) { Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1); d_outMask[outMaskOffset] = outMaskVal; } #endif } } template <NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction> __global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { Ncv32u y_offs = blockIdx.y; Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs; Ncv32u y_offs_upsc = step * y_offs; Ncv32u x_offs_upsc = step * x_offs; Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc; Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U; if (x_offs_upsc < anchorsRoi.width && (!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U)) { outElem = (y_offs_upsc << 16) | x_offs_upsc; } if (!tbDoAtomicCompaction) { d_outMask[outMaskOffset] = outElem; } else { compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U, outElem, d_outMask); } } struct applyHaarClassifierAnchorParallelFunctor { dim3 gridConf, blockConf; hipStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { (void)tl; hipLaunchKernelGGL(( applyHaarClassifierAnchorParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value, Loki::TL::TypeAt<TList, 3>::Result::value, Loki::TL::TypeAt<TList, 4>::Result::value >) , dim3(gridConf), dim3(blockConf), 0, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, hipStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor> ::call( &functor, tbInitMaskPositively, tbCacheTextureIImg, tbCacheTextureCascade, tbReadPixelIndexFromVector, tbDoAtomicCompaction); } struct applyHaarClassifierClassifierParallelFunctor { dim3 gridConf, blockConf; hipStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { (void)tl; hipLaunchKernelGGL(( applyHaarClassifierClassifierParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value >) , dim3(gridConf), dim3(blockConf), 0, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, hipStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor> ::call( &functor, tbCacheTextureIImg, tbCacheTextureCascade, tbDoAtomicCompaction); } struct initializeMaskVectorFunctor { dim3 gridConf, blockConf; hipStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u step; //Arguments are passed through the constructor initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _step) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), step(_step) {} template<class TList> void call(TList tl) { (void)tl; hipLaunchKernelGGL(( initializeMaskVector < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value >) , dim3(gridConf), dim3(blockConf), 0, cuStream, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); } }; void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, hipStream_t cuStream, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor> ::call( &functor, tbMaskByInmask, tbDoAtomicCompaction); } Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages) { Ncv32u i = 0; for (; i<haar.NumStages; i++) { if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N) { break; } } return i; } NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &integral, NCVMatrix<Ncv32f> &d_weights, NCVMatrixAlloc<Ncv32u> &d_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea, INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, hipDeviceProp_t &devProp, hipStream_t cuStream) { ncvAssertReturn(integral.memType() == d_weights.memType()&& integral.memType() == d_pixelMask.memType() && integral.memType() == gpuAllocator.memType() && (integral.memType() == NCVMemoryTypeDevice || integral.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((integral.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height && d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height && integral.width() >= anchorsRoi.width + haar.ClassifierSize.width && integral.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); #if defined _SELF_TEST_ NCVStatus ncvStat; NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, integral.width, integral.height, integral.pitch); ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch); ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length); ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length); ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN ncvStat = d_pixelMask.copySolid(h_pixelMask, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = integral.copySolid(h_integralImage, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_weights.copySolid(h_weights, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(0), NCV_CUDA_ERROR); for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++) { for (Ncv32u j=0; j<d_pixelMask.stride(); j++) { if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width)) { if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U) { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j; } } else { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U; } } } NCV_SKIP_COND_END #endif NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride()); ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE); NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, static_cast<Ncv32u>(d_vecPixelMask.length())); ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2); ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); Ncv32u *hp_zero = &hp_pool32u.ptr()[0]; Ncv32u *hp_numDet = &hp_pool32u.ptr()[1]; NCV_SKIP_COND_BEGIN *hp_zero = 0; *hp_numDet = 0; NCV_SKIP_COND_END Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); NcvBool bTexCacheCascade = devProp.major < 2; NcvBool bTexCacheIImg = true; //this works better even on Fermi so far NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3); NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask; NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp; Ncv32u szNppCompactTmpBuf; nppsStCompactGetSize_32u(static_cast<Ncv32u>(d_vecPixelMask.length()), &szNppCompactTmpBuf, devProp); if (bDoAtomicCompaction) { szNppCompactTmpBuf = 0; } NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf); NCV_SKIP_COND_BEGIN if (bTexCacheIImg) { hipChannelFormatDesc cfdTexIImage; cfdTexIImage = hipCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texIImage, integral.ptr(), cfdTexIImage, (anchorsRoi.height + haar.ClassifierSize.height) * integral.pitch()), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } if (bTexCacheCascade) { hipChannelFormatDesc cfdTexHaarFeatures; hipChannelFormatDesc cfdTexHaarClassifierNodes; cfdTexHaarFeatures = hipCreateChannelDesc<uint2>(); cfdTexHaarClassifierNodes = hipCreateChannelDesc<uint4>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarFeatures, d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarClassifierNodes, d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } Ncv32u stageStartAnchorParallel = 0; Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL, haar, h_HaarStages); Ncv32u stageEndClassifierParallel = haar.NumStages; if (stageMiddleSwitch == 0) { stageMiddleSwitch = 1; } //create stages subdivision for pixel-parallel processing const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1; Ncv32u curStop = stageStartAnchorParallel; std::vector<Ncv32u> pixParallelStageStops; while (curStop < stageMiddleSwitch) { pixParallelStageStops.push_back(curStop); curStop += compactEveryNstage; } if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2) { pixParallelStageStops[pixParallelStageStops.size()-1] = (stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2; } pixParallelStageStops.push_back(stageMiddleSwitch); Ncv32u pixParallelStageStopsIndex = 0; if (pixelStep != 1 || bMaskElements) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), (anchorsRoi.height + pixelStep - 1) / pixelStep); dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL); if (gridInit.x == 0 || gridInit.y == 0) { numDetections = 0; return NCV_SUCCESS; } initializeMaskVectorDynTemplate(bMaskElements, bDoAtomicCompaction, gridInit, blockInit, cuStream, d_ptrNowData->ptr(), d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_pixelMask.stride(), anchorsRoi, pixelStep); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); swap(d_ptrNowData, d_ptrNowTmp); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR); } numDetections = *hp_numDet; } else { // // 1. Run the first pixel-input pixel-parallel classifier for few stages // if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), anchorsRoi.height); dim3 block1(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( true, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid1, block1, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), 0, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; pixParallelStageStopsIndex++; } // // 2. Run pixel-parallel stages // for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++) { if (numDetections == 0) { break; } if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL); if (numDetections > MAX_GRID_DIM) { grid2.x = MAX_GRID_DIM; grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block2(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( false, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid2, block2, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } // // 3. Run all left stages in one stage-parallel kernel // if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid3(numDetections); if (numDetections > MAX_GRID_DIM) { grid3.x = MAX_GRID_DIM; grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL); applyHaarClassifierClassifierParallelDynTemplate( bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade bDoAtomicCompaction, //tbDoAtomicCompaction grid3, block3, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, stageMiddleSwitch, stageEndClassifierParallel, scaleAreaPixels); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } if (d_ptrNowData != &d_vecPixelMask) { d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } #if defined _SELF_TEST_ ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections); } Ncv32u fpu_oldcw, fpu_cw; _controlfp_s(&fpu_cw, 0, 0); fpu_oldcw = fpu_cw; _controlfp_s(&fpu_cw, _PC_24, _MCW_PC); Ncv32u numDetGold; ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, bMaskElements, anchorsRoi, pixelStep, scaleArea); ncvAssertReturnNcvStat(ncvStat); _controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC); bool bPass = true; if (numDetGold != numDetections) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections); bPass = false; } else { for (Ncv32u i=0; i<::max(numDetGold, numDetections) && bPass; i++) { if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i]) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]); bPass = false; } } } printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED"); #endif NCV_SKIP_COND_END return NCV_SUCCESS; } //============================================================================== // // HypothesesOperations file // //============================================================================== const Ncv32u NUM_GROW_THREADS = 128; __device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale) { NcvRect32u res; res.x = (Ncv32u)(scale * (pixel & 0xFFFF)); res.y = (Ncv32u)(scale * (pixel >> 16)); res.width = (Ncv32u)(scale * width); res.height = (Ncv32u)(scale * height); return res; } __global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements, NcvRect32u *hypotheses, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x; if (elemAddr >= numElements) { return; } hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale); } NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale, hipStream_t cuStream) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } dim3 block(NUM_GROW_THREADS); dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( growDetectionsKernel), dim3(grid), dim3(block), 0, cuStream, pixelMask.ptr(), numDetsToCopy, hypotheses.ptr() + totalDetections, rectWidth, rectHeight, curScale); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); totalDetections += numDetsToCopy; return ncvStat; } //============================================================================== // // Pipeline file // //============================================================================== NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg, NcvSize32u srcRoi, NCVVector<NcvRect32u> &d_dstRects, Ncv32u &dstNumRects, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvSize32u minObjSize, Ncv32u minNeighbors, //default 4 Ncv32f scaleStep, //default 1.2f Ncv32u pixelStep, //default 1 Ncv32u flags, //default NCVPipeObjDet_Default INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, hipDeviceProp_t &devProp, hipStream_t cuStream) { ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() && d_srcImg.memType() == gpuAllocator.memType() && (d_srcImg.memType() == NCVMemoryTypeDevice || d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 && d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height && srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height && d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); //TODO: set NPP active stream to cuStream NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); Ncv32u integralWidth = d_srcImg.width() + 1; Ncv32u integralHeight = d_srcImg.height() + 1; NCVMatrixAlloc<Ncv32u> integral(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(integral.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVStatus nppStat; Ncv32u szTmpBufIntegral, szTmpBufSqIntegral; nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, ::max(szTmpBufIntegral, szTmpBufSqIntegral)); ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), integral.ptr(), integral.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END dstNumRects = 0; Ncv32u lastCheckedScale = 0; NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0); std::vector<Ncv32u> scalesVector; NcvBool bFoundLargestFace = false; for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep) { Ncv32u scale = (Ncv32u)scaleIter; if (lastCheckedScale == scale) { continue; } lastCheckedScale = scale; if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width || haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height) { continue; } NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRo_i.width = srcRoi_.width + 1; srcIIRo_i.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRo_i.width / scale; scaledIIRoi.height = srcIIRo_i.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; if (searchRoi.width <= 0 || searchRoi.height <= 0) { break; } scalesVector.push_back(scale); if (gpuAllocator.isCounting()) { break; } } if (bReverseTraverseScale) { std::reverse(scalesVector.begin(), scalesVector.end()); } //TODO: handle _fair_scale_ flag for (Ncv32u i=0; i<scalesVector.size(); i++) { Ncv32u scale = scalesVector[i]; NcvSize32u srcRoi_, scaledIIRoi, searchRoi; NcvSize32u srcIIRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRoi.width = srcRoi_.width + 1; srcIIRoi.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRoi.width / scale; scaledIIRoi.height = srcIIRoi.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; NCV_SKIP_COND_BEGIN nppStat = nppiStDecimate_32u_C1R( integral.ptr(), integral.pitch(), d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStDecimate_64u_C1R( d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); const NcvRect32u rect( HAAR_STDDEV_BORDER, HAAR_STDDEV_BORDER, haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER, haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER); nppStat = nppiStRectStdDev_32f_C1R( d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), d_rectStdDev.ptr(), d_rectStdDev.pitch(), NcvSize32u(searchRoi.width, searchRoi.height), rect, (Ncv32f)scale*scale, true); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END Ncv32u detectionsOnThisScale; ncvStat = ncvApplyHaarClassifierCascade_device( d_scaledIntegralImage, d_rectStdDev, d_pixelMask, detectionsOnThisScale, haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false, searchRoi, pixelStep, (Ncv32f)scale*scale, gpuAllocator, cpuAllocator, devProp, cuStream); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_BEGIN NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment()); ncvStat = ncvGrowDetectionsVector_device( d_vecPixelMask, detectionsOnThisScale, d_hypothesesIntermediate, dstNumRects, static_cast<Ncv32u>(d_hypothesesIntermediate.length()), haar.ClassifierSize.width, haar.ClassifierSize.height, (Ncv32f)scale, cuStream); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); if (flags & NCVPipeObjDet_FindLargestObject) { if (dstNumRects == 0) { continue; } if (dstNumRects != 0) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } Ncv32u numStrongHypothesesNow = dstNumRects; ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, numStrongHypothesesNow, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (numStrongHypothesesNow > 0) { NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0]; for (Ncv32u j=1; j<numStrongHypothesesNow; j++) { if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width) { maxRect = h_hypothesesIntermediate.ptr()[j]; } } h_hypothesesIntermediate.ptr()[0] = maxRect; dstNumRects = 1; ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); bFoundLargestFace = true; break; } } NCV_SKIP_COND_END if (gpuAllocator.isCounting()) { break; } } NCVStatus ncvRetCode = NCV_SUCCESS; NCV_SKIP_COND_BEGIN if (flags & NCVPipeObjDet_FindLargestObject) { if (!bFoundLargestFace) { dstNumRects = 0; } } else { //TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left) if (dstNumRects != 0) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, dstNumRects, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (dstNumRects > d_dstRects.length()) { ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; dstNumRects = static_cast<Ncv32u>(d_dstRects.length()); } if (dstNumRects != 0) { ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); } } if (flags & NCVPipeObjDet_VisualizeInPlace) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(), d_srcImg.width(), d_srcImg.height(), d_dstRects.ptr(), dstNumRects, 255, cuStream); } NCV_SKIP_COND_END return ncvRetCode; } //============================================================================== // // Purely Host code: classifier IO, mock-ups // //============================================================================== #ifdef _SELF_TEST_ #include <float.h> #endif NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage, NCVMatrix<Ncv32f> &h_weights, NCVMatrixAlloc<Ncv32u> &h_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea) { ncvAssertReturn(h_integralImage.memType() == h_weights.memType() && h_integralImage.memType() == h_pixelMask.memType() && (h_integralImage.memType() == NCVMemoryTypeHostPageable || h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() && h_HaarStages.memType() == h_HaarFeatures.memType() && (h_HaarStages.memType() == NCVMemoryTypeHostPageable || h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height && h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height && h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width && h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(h_HaarStages.length() >= haar.NumStages && h_HaarNodes.length() >= haar.NumClassifierTotalNodes && h_HaarFeatures.length() >= haar.NumFeatures && h_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); for (Ncv32u i=0; i<anchorsRoi.height; i++) { for (Ncv32u j=0; j<h_pixelMask.stride(); j++) { if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width) { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; } else { for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++) { Ncv32f curStageSum = 0.0f; Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset(); if (iStage == 0) { if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } else { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j); } } else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u curNodeOffset = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset]; HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures(); Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect]; Ncv32u rectX, rectY, rectWidth, rectHeight; feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight); Ncv32f rectWeight = feature.getWeight(); Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride(); Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL]; Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR]; Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL]; Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR]; Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR; curNodeVal += (Ncv32f)rectSum * rectWeight; } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { curNodeOffset = nextNodeDescriptor.getNextNodeOffset(); } } curRootNodeOffset++; } Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold(); if (curStageSum < tmpStageThreshold) { //drop h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } } } } std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride()); Ncv32u i = 0; for (; i<anchorsRoi.height * h_pixelMask.stride(); i++) { if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } } numDetections = i; return NCV_SUCCESS; } NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } for (Ncv32u i=0; i<numDetsToCopy; i++) { hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale); } totalDetections += numDetsToCopy; return ncvStat; } static NCVStatus loadFromXML(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures) { #ifndef HAVE_OPENCV_OBJDETECT (void) filename; (void) haar; (void) haarStages; (void) haarClassifierNodes; (void) haarFeatures; CV_Error(cv::Error::StsNotImplemented, "This functionality requires objdetect module"); return NCV_HAAR_XML_LOADING_EXCEPTION; #else NCVStatus ncvStat; haar.NumStages = 0; haar.NumClassifierRootNodes = 0; haar.NumClassifierTotalNodes = 0; haar.NumFeatures = 0; haar.ClassifierSize.width = 0; haar.ClassifierSize.height = 0; haar.bHasStumpsOnly = true; haar.bNeedsTiltedII = false; Ncv32u curMaxTreeDepth = 0; std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes; haarStages.resize(0); haarClassifierNodes.resize(0); haarFeatures.resize(0); cv::Ptr<CvHaarClassifierCascade> oldCascade((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0)); if (!oldCascade) { return NCV_HAAR_XML_LOADING_EXCEPTION; } haar.ClassifierSize.width = oldCascade->orig_window_size.width; haar.ClassifierSize.height = oldCascade->orig_window_size.height; int stagesCount = oldCascade->count; for(int s = 0; s < stagesCount; ++s) // by stages { HaarStage64 curStage; curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size())); curStage.setStageThreshold(oldCascade->stage_classifier[s].threshold); int treesCount = oldCascade->stage_classifier[s].count; for(int t = 0; t < treesCount; ++t) // by trees { Ncv32u nodeId = 0; CvHaarClassifier* tree = &oldCascade->stage_classifier[s].classifier[t]; int nodesCount = tree->count; for(int n = 0; n < nodesCount; ++n) //by features { CvHaarFeature* feature = &tree->haar_feature[n]; HaarClassifierNode128 curNode; curNode.setThreshold(tree->threshold[n]); NcvBool bIsLeftNodeLeaf = false; NcvBool bIsRightNodeLeaf = false; HaarClassifierNodeDescriptor32 nodeLeft; if ( tree->left[n] <= 0 ) { Ncv32f leftVal = tree->alpha[-tree->left[n]]; ncvStat = nodeLeft.create(leftVal); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); bIsLeftNodeLeaf = true; } else { Ncv32u leftNodeOffset = tree->left[n]; nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1)); haar.bHasStumpsOnly = false; } curNode.setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight; if ( tree->right[n] <= 0 ) { Ncv32f rightVal = tree->alpha[-tree->right[n]]; ncvStat = nodeRight.create(rightVal); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); bIsRightNodeLeaf = true; } else { Ncv32u rightNodeOffset = tree->right[n]; nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1)); haar.bHasStumpsOnly = false; } curNode.setRightNodeDesc(nodeRight); Ncv32u tiltedVal = feature->tilted; haar.bNeedsTiltedII = (tiltedVal != 0); Ncv32u featureId = 0; for(int l = 0; l < CV_HAAR_FEATURE_MAX; ++l) //by rects { Ncv32u rectX = feature->rect[l].r.x; Ncv32u rectY = feature->rect[l].r.y; Ncv32u rectWidth = feature->rect[l].r.width; Ncv32u rectHeight = feature->rect[l].r.height; Ncv32f rectWeight = feature->rect[l].weight; if (rectWeight == 0/* && rectX == 0 &&rectY == 0 && rectWidth == 0 && rectHeight == 0*/) break; HaarFeature64 curFeature; ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height); curFeature.setWeight(rectWeight); ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); haarFeatures.push_back(curFeature); featureId++; } HaarFeatureDescriptor32 tmpFeatureDesc; ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf, featureId, static_cast<Ncv32u>(haarFeatures.size()) - featureId); ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); curNode.setFeatureDesc(tmpFeatureDesc); if (!nodeId) { //root node haarClassifierNodes.push_back(curNode); curMaxTreeDepth = 1; } else { //other node h_TmpClassifierNotRootNodes.push_back(curNode); curMaxTreeDepth++; } nodeId++; } } curStage.setNumClassifierRootNodes(treesCount); haarStages.push_back(curStage); } //fill in cascade stats haar.NumStages = static_cast<Ncv32u>(haarStages.size()); haar.NumClassifierRootNodes = static_cast<Ncv32u>(haarClassifierNodes.size()); haar.NumClassifierTotalNodes = static_cast<Ncv32u>(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size()); haar.NumFeatures = static_cast<Ncv32u>(haarFeatures.size()); //merge root and leaf nodes in one classifiers array Ncv32u offsetRoot = static_cast<Ncv32u>(haarClassifierNodes.size()); for (Ncv32u i=0; i<haarClassifierNodes.size(); i++) { HaarFeatureDescriptor32 featureDesc = haarClassifierNodes[i].getFeatureDesc(); HaarClassifierNodeDescriptor32 nodeLeft = haarClassifierNodes[i].getLeftNodeDesc(); if (!featureDesc.isLeftNodeLeaf()) { Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot; nodeLeft.create(newOffset); } haarClassifierNodes[i].setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight = haarClassifierNodes[i].getRightNodeDesc(); if (!featureDesc.isRightNodeLeaf()) { Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot; nodeRight.create(newOffset); } haarClassifierNodes[i].setRightNodeDesc(nodeRight); } for (Ncv32u i=0; i<h_TmpClassifierNotRootNodes.size(); i++) { HaarFeatureDescriptor32 featureDesc = h_TmpClassifierNotRootNodes[i].getFeatureDesc(); HaarClassifierNodeDescriptor32 nodeLeft = h_TmpClassifierNotRootNodes[i].getLeftNodeDesc(); if (!featureDesc.isLeftNodeLeaf()) { Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot; nodeLeft.create(newOffset); } h_TmpClassifierNotRootNodes[i].setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight = h_TmpClassifierNotRootNodes[i].getRightNodeDesc(); if (!featureDesc.isRightNodeLeaf()) { Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot; nodeRight.create(newOffset); } h_TmpClassifierNotRootNodes[i].setRightNodeDesc(nodeRight); haarClassifierNodes.push_back(h_TmpClassifierNotRootNodes[i]); } return NCV_SUCCESS; #endif } #define NVBIN_HAAR_SIZERESERVED 16 #define NVBIN_HAAR_VERSION 0x1 static NCVStatus loadFromNVBIN(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures) { size_t readCount; FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); Ncv32u fsize; readCount = fread(&fsize, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fseek(fp, 0, SEEK_END); Ncv32u fsizeActual = ftell(fp); ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR); std::vector<unsigned char> fdata; fdata.resize(fsize); Ncv32u dataOffset = 0; fseek(fp, 0, SEEK_SET); readCount = fread(&fdata[0], fsize, 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); //data dataOffset = NVBIN_HAAR_SIZERESERVED; haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvSize32u); haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haarStages.resize(haar.NumStages); haarClassifierNodes.resize(haar.NumClassifierTotalNodes); haarFeatures.resize(haar.NumFeatures); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages); dataOffset += szStages; memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers); dataOffset += szClassifiers; memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures); dataOffset += szFeatures; return NCV_SUCCESS; } NCVStatus ncvHaarGetClassifierSize(const cv::String &filename, Ncv32u &numStages, Ncv32u &numNodes, Ncv32u &numFeatures) { size_t readCount; NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower); if (fext == "nvbin") { FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET); Ncv32u tmp; readCount = fread(&numStages, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&tmp, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numNodes, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numFeatures, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); } else if (fext == "xml") { HaarClassifierCascadeDescriptor haar; std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); numStages = haar.NumStages; numNodes = haar.NumClassifierTotalNodes; numFeatures = haar.NumFeatures; } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } return NCV_SUCCESS; } NCVStatus ncvHaarLoadFromFile_host(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower); std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; if (fext == "nvbin") { ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else if (fext == "xml") { ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY); memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64)); memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128)); memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64)); return NCV_SUCCESS; } NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename, HaarClassifierCascadeDescriptor haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); Ncv32u dataOffset = 0; std::vector<unsigned char> fdata; fdata.resize(szStages+szClassifiers+szFeatures+1024, 0); //header *(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION; //data dataOffset = NVBIN_HAAR_SIZERESERVED; *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures; dataOffset += sizeof(Ncv32u); *(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize; dataOffset += sizeof(NcvSize32u); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII; dataOffset += sizeof(NcvBool); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly; dataOffset += sizeof(NcvBool); memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages); dataOffset += szStages; memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers); dataOffset += szClassifiers; memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures); dataOffset += szFeatures; Ncv32u fsize = dataOffset; //TODO: CRC32 here //update header dataOffset = sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = fsize; FILE *fp = fopen(filename.c_str(), "wb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); fwrite(&fdata[0], fsize, 1, fp); fclose(fp); return NCV_SUCCESS; }
5fa502e7f620e6414682235e16bbc19121d8e7f1.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ //////////////////////////////////////////////////////////////////////////////// // // NVIDIA CUDA implementation of Viola-Jones Object Detection Framework // // The algorithm and code are explained in the upcoming GPU Computing Gems // chapter in detail: // // Anton Obukhov, "Haar Classifiers for Object Detection with CUDA" // PDF URL placeholder // email: [email protected], [email protected] // // Credits for help with the code to: // Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov. // //////////////////////////////////////////////////////////////////////////////// #include <algorithm> #include <cstdio> #include "opencv2/core/cuda/warp.hpp" #include "opencv2/core/cuda/warp_shuffle.hpp" #include "opencv2/opencv_modules.hpp" #ifdef HAVE_OPENCV_OBJDETECT # include "opencv2/objdetect.hpp" # include "opencv2/objdetect/objdetect_c.h" #endif #include "opencv2/cudalegacy/NCV.hpp" #include "opencv2/cudalegacy/NPP_staging.hpp" #include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp" #include "NCVRuntimeTemplates.hpp" #include "NCVAlg.hpp" //============================================================================== // // BlockScan file // //============================================================================== NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive //Almost the same as naive scan1Inclusive, but doesn't need __syncthreads() //assuming size <= WARP_SIZE and size is power of 2 __device__ Ncv32u warpScanInclusive(Ncv32u idata, volatile Ncv32u *s_Data) { #if __CUDA_ARCH__ >= 300 const unsigned int laneId = cv::cuda::device::Warp::laneId(); // scan on shuffl functions #pragma unroll for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2) { const Ncv32u n = cv::cuda::device::shfl_up(idata, i); if (laneId >= i) idata += n; } return idata; #else Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; #endif } __device__ __forceinline__ Ncv32u warpScanExclusive(Ncv32u idata, volatile Ncv32u *s_Data) { return warpScanInclusive(idata, s_Data) - idata; } template <Ncv32u tiNumScanThreads> __device__ Ncv32u scan1Inclusive(Ncv32u idata, volatile Ncv32u *s_Data) { if (tiNumScanThreads > K_WARP_SIZE) { //Bottom-level inclusive warp scan Ncv32u warpResult = warpScanInclusive(idata, s_Data); //Save top elements of each warp for exclusive warp scan //sync to wait for warp scans to complete (because s_Data is being overwritten) __syncthreads(); if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) ) { s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult; } //wait for warp scans to complete __syncthreads(); if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) ) { //grab top warp elements Ncv32u val = s_Data[threadIdx.x]; //calculate exclusive scan and write back to shared memory s_Data[threadIdx.x] = warpScanExclusive(val, s_Data); } //return updated warp scans with exclusive scan results __syncthreads(); return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE]; } else { return warpScanInclusive(idata, s_Data); } } //============================================================================== // // HaarClassifierCascade file // //============================================================================== const Ncv32u MAX_GRID_DIM = 65535; const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64; #define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6 #define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2) /** \internal * Haar features solid array. */ texture<uint2, 1, cudaReadModeElementType> texHaarFeatures; /** \internal * Haar classifiers flattened trees container. * Two parts: first contains root nodes, second - nodes that are referred by root nodes. * Drawback: breaks tree locality (might cause more cache misses * Advantage: No need to introduce additional 32-bit field to index root nodes offsets */ texture<uint4, 1, cudaReadModeElementType> texHaarClassifierNodes; texture<Ncv32u, 1, cudaReadModeElementType> texIImage; __device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages) { return d_Stages[iStage]; } template <NcvBool tbCacheTextureCascade> __device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes) { HaarClassifierNode128 tmpNode; if (tbCacheTextureCascade) { tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode); } else { tmpNode = d_ClassifierNodes[iNode]; } return tmpNode; } template <NcvBool tbCacheTextureCascade> __device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features, Ncv32f *weight, Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight) { HaarFeature64 feature; if (tbCacheTextureCascade) { feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature); } else { feature = d_Features[iFeature]; } feature.getRect(rectX, rectY, rectWidth, rectHeight); *weight = feature.getWeight(); } template <NcvBool tbCacheTextureIImg> __device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg) { if (tbCacheTextureIImg) { return tex1Dfetch(texIImage, x); } else { return d_IImg[x]; } } __device__ Ncv32u d_outMaskPosition; __device__ void compactBlockWriteOutAnchorParallel(Ncv32u threadPassFlag, Ncv32u threadElem, Ncv32u *vectorOut) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 __shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL * 2]; __shared__ Ncv32u numPassed; __shared__ Ncv32u outMaskOffset; Ncv32u incScan = scan1Inclusive<NUM_THREADS_ANCHORSPARALLEL>(threadPassFlag, shmem); __syncthreads(); if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1) { numPassed = incScan; outMaskOffset = atomicAdd(&d_outMaskPosition, incScan); } if (threadPassFlag) { Ncv32u excScan = incScan - threadPassFlag; shmem[excScan] = threadElem; } __syncthreads(); if (threadIdx.x < numPassed) { vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x]; } #endif } template <NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u y_offs; Ncv32u x_offs; Ncv32u maskOffset; Ncv32u outMaskVal; NcvBool bInactiveThread = false; if (tbReadPixelIndexFromVector) { maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (maskOffset >= mask1Dlen) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { outMaskVal = d_inMask[maskOffset]; y_offs = outMaskVal >> 16; x_offs = outMaskVal & 0xFFFF; } } else { y_offs = blockIdx.y; x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (x_offs >= mask2Dstride) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { maskOffset = y_offs * mask2Dstride + x_offs; if ((x_offs >= anchorsRoi.width) || (!tbInitMaskPositively && d_inMask != d_outMask && d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U)) { if (tbDoAtomicCompaction) { bInactiveThread = true; } else { d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U; return; } } outMaskVal = (y_offs << 16) | x_offs; } } NcvBool bPass = true; if (!tbDoAtomicCompaction || tbDoAtomicCompaction) { Ncv32f pixelStdDev = 0.0f; if (!bInactiveThread) pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; for (Ncv32u iStage = startStageInc; iStage < endStageExc; iStage++) { Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset(); Ncv32f stageThreshold = curStage.getStageThreshold(); while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u iNode = curRootNodeOffset; if (bPass && !bInactiveThread) { while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset++; } if (curStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; } } } __syncthreads(); if (!tbDoAtomicCompaction) { if (!tbReadPixelIndexFromVector || (tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask))) { d_outMask[maskOffset] = outMaskVal; } } else { compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread, outMaskVal, d_outMask); } } template <NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x; if (maskOffset >= mask1Dlen) { return; } Ncv32u outMaskVal = d_inMask[maskOffset]; Ncv32u y_offs = outMaskVal >> 16; Ncv32u x_offs = outMaskVal & 0xFFFF; Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; NcvBool bPass = true; for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++) { //this variable is subject to reduction Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x; Ncv32f stageThreshold = curStage.getStageThreshold(); Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2; for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++) { NcvBool bMoreNodesToTraverse = true; if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage) { Ncv32u iNode = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; //TODO: fetch into shmem if size suffices. Shmem can be shared with reduce for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL; } Ncv32f finalStageSum = subReduce<Ncv32f, functorAddValues<Ncv32f>, NUM_THREADS_CLASSIFIERPARALLEL>(curStageSum); if (finalStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } if (!tbDoAtomicCompaction) { if (!bPass || d_inMask != d_outMask) { if (!threadIdx.x) { d_outMask[maskOffset] = outMaskVal; } } } else { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 if (bPass && !threadIdx.x) { Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1); d_outMask[outMaskOffset] = outMaskVal; } #endif } } template <NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction> __global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { Ncv32u y_offs = blockIdx.y; Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs; Ncv32u y_offs_upsc = step * y_offs; Ncv32u x_offs_upsc = step * x_offs; Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc; Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U; if (x_offs_upsc < anchorsRoi.width && (!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U)) { outElem = (y_offs_upsc << 16) | x_offs_upsc; } if (!tbDoAtomicCompaction) { d_outMask[outMaskOffset] = outElem; } else { compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U, outElem, d_outMask); } } struct applyHaarClassifierAnchorParallelFunctor { dim3 gridConf, blockConf; cudaStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { (void)tl; applyHaarClassifierAnchorParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value, Loki::TL::TypeAt<TList, 3>::Result::value, Loki::TL::TypeAt<TList, 4>::Result::value > <<<gridConf, blockConf, 0, cuStream>>> (d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, cudaStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor> ::call( &functor, tbInitMaskPositively, tbCacheTextureIImg, tbCacheTextureCascade, tbReadPixelIndexFromVector, tbDoAtomicCompaction); } struct applyHaarClassifierClassifierParallelFunctor { dim3 gridConf, blockConf; cudaStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { (void)tl; applyHaarClassifierClassifierParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value > <<<gridConf, blockConf, 0, cuStream>>> (d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, cudaStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor> ::call( &functor, tbCacheTextureIImg, tbCacheTextureCascade, tbDoAtomicCompaction); } struct initializeMaskVectorFunctor { dim3 gridConf, blockConf; cudaStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u step; //Arguments are passed through the constructor initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _step) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), step(_step) {} template<class TList> void call(TList tl) { (void)tl; initializeMaskVector < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value > <<<gridConf, blockConf, 0, cuStream>>> (d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); } }; void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, cudaStream_t cuStream, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor> ::call( &functor, tbMaskByInmask, tbDoAtomicCompaction); } Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages) { Ncv32u i = 0; for (; i<haar.NumStages; i++) { if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N) { break; } } return i; } NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &integral, NCVMatrix<Ncv32f> &d_weights, NCVMatrixAlloc<Ncv32u> &d_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea, INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, cudaDeviceProp &devProp, cudaStream_t cuStream) { ncvAssertReturn(integral.memType() == d_weights.memType()&& integral.memType() == d_pixelMask.memType() && integral.memType() == gpuAllocator.memType() && (integral.memType() == NCVMemoryTypeDevice || integral.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((integral.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height && d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height && integral.width() >= anchorsRoi.width + haar.ClassifierSize.width && integral.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); #if defined _SELF_TEST_ NCVStatus ncvStat; NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, integral.width, integral.height, integral.pitch); ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch); ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length); ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length); ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN ncvStat = d_pixelMask.copySolid(h_pixelMask, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = integral.copySolid(h_integralImage, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_weights.copySolid(h_weights, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR); for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++) { for (Ncv32u j=0; j<d_pixelMask.stride(); j++) { if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width)) { if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U) { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j; } } else { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U; } } } NCV_SKIP_COND_END #endif NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride()); ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE); NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, static_cast<Ncv32u>(d_vecPixelMask.length())); ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2); ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); Ncv32u *hp_zero = &hp_pool32u.ptr()[0]; Ncv32u *hp_numDet = &hp_pool32u.ptr()[1]; NCV_SKIP_COND_BEGIN *hp_zero = 0; *hp_numDet = 0; NCV_SKIP_COND_END Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); NcvBool bTexCacheCascade = devProp.major < 2; NcvBool bTexCacheIImg = true; //this works better even on Fermi so far NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3); NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask; NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp; Ncv32u szNppCompactTmpBuf; nppsStCompactGetSize_32u(static_cast<Ncv32u>(d_vecPixelMask.length()), &szNppCompactTmpBuf, devProp); if (bDoAtomicCompaction) { szNppCompactTmpBuf = 0; } NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf); NCV_SKIP_COND_BEGIN if (bTexCacheIImg) { cudaChannelFormatDesc cfdTexIImage; cfdTexIImage = cudaCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texIImage, integral.ptr(), cfdTexIImage, (anchorsRoi.height + haar.ClassifierSize.height) * integral.pitch()), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } if (bTexCacheCascade) { cudaChannelFormatDesc cfdTexHaarFeatures; cudaChannelFormatDesc cfdTexHaarClassifierNodes; cfdTexHaarFeatures = cudaCreateChannelDesc<uint2>(); cfdTexHaarClassifierNodes = cudaCreateChannelDesc<uint4>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarFeatures, d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarClassifierNodes, d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } Ncv32u stageStartAnchorParallel = 0; Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL, haar, h_HaarStages); Ncv32u stageEndClassifierParallel = haar.NumStages; if (stageMiddleSwitch == 0) { stageMiddleSwitch = 1; } //create stages subdivision for pixel-parallel processing const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1; Ncv32u curStop = stageStartAnchorParallel; std::vector<Ncv32u> pixParallelStageStops; while (curStop < stageMiddleSwitch) { pixParallelStageStops.push_back(curStop); curStop += compactEveryNstage; } if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2) { pixParallelStageStops[pixParallelStageStops.size()-1] = (stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2; } pixParallelStageStops.push_back(stageMiddleSwitch); Ncv32u pixParallelStageStopsIndex = 0; if (pixelStep != 1 || bMaskElements) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), (anchorsRoi.height + pixelStep - 1) / pixelStep); dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL); if (gridInit.x == 0 || gridInit.y == 0) { numDetections = 0; return NCV_SUCCESS; } initializeMaskVectorDynTemplate(bMaskElements, bDoAtomicCompaction, gridInit, blockInit, cuStream, d_ptrNowData->ptr(), d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_pixelMask.stride(), anchorsRoi, pixelStep); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); swap(d_ptrNowData, d_ptrNowTmp); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR); } numDetections = *hp_numDet; } else { // // 1. Run the first pixel-input pixel-parallel classifier for few stages // if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), anchorsRoi.height); dim3 block1(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( true, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid1, block1, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), 0, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; pixParallelStageStopsIndex++; } // // 2. Run pixel-parallel stages // for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++) { if (numDetections == 0) { break; } if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL); if (numDetections > MAX_GRID_DIM) { grid2.x = MAX_GRID_DIM; grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block2(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( false, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid2, block2, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } // // 3. Run all left stages in one stage-parallel kernel // if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid3(numDetections); if (numDetections > MAX_GRID_DIM) { grid3.x = MAX_GRID_DIM; grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL); applyHaarClassifierClassifierParallelDynTemplate( bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade bDoAtomicCompaction, //tbDoAtomicCompaction grid3, block3, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, stageMiddleSwitch, stageEndClassifierParallel, scaleAreaPixels); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } if (d_ptrNowData != &d_vecPixelMask) { d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } #if defined _SELF_TEST_ ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections); } Ncv32u fpu_oldcw, fpu_cw; _controlfp_s(&fpu_cw, 0, 0); fpu_oldcw = fpu_cw; _controlfp_s(&fpu_cw, _PC_24, _MCW_PC); Ncv32u numDetGold; ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, bMaskElements, anchorsRoi, pixelStep, scaleArea); ncvAssertReturnNcvStat(ncvStat); _controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC); bool bPass = true; if (numDetGold != numDetections) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections); bPass = false; } else { for (Ncv32u i=0; i<std::max(numDetGold, numDetections) && bPass; i++) { if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i]) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]); bPass = false; } } } printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED"); #endif NCV_SKIP_COND_END return NCV_SUCCESS; } //============================================================================== // // HypothesesOperations file // //============================================================================== const Ncv32u NUM_GROW_THREADS = 128; __device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale) { NcvRect32u res; res.x = (Ncv32u)(scale * (pixel & 0xFFFF)); res.y = (Ncv32u)(scale * (pixel >> 16)); res.width = (Ncv32u)(scale * width); res.height = (Ncv32u)(scale * height); return res; } __global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements, NcvRect32u *hypotheses, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x; if (elemAddr >= numElements) { return; } hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale); } NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale, cudaStream_t cuStream) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } dim3 block(NUM_GROW_THREADS); dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } growDetectionsKernel<<<grid, block, 0, cuStream>>>(pixelMask.ptr(), numDetsToCopy, hypotheses.ptr() + totalDetections, rectWidth, rectHeight, curScale); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); totalDetections += numDetsToCopy; return ncvStat; } //============================================================================== // // Pipeline file // //============================================================================== NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg, NcvSize32u srcRoi, NCVVector<NcvRect32u> &d_dstRects, Ncv32u &dstNumRects, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvSize32u minObjSize, Ncv32u minNeighbors, //default 4 Ncv32f scaleStep, //default 1.2f Ncv32u pixelStep, //default 1 Ncv32u flags, //default NCVPipeObjDet_Default INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, cudaDeviceProp &devProp, cudaStream_t cuStream) { ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() && d_srcImg.memType() == gpuAllocator.memType() && (d_srcImg.memType() == NCVMemoryTypeDevice || d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 && d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height && srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height && d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); //TODO: set NPP active stream to cuStream NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); Ncv32u integralWidth = d_srcImg.width() + 1; Ncv32u integralHeight = d_srcImg.height() + 1; NCVMatrixAlloc<Ncv32u> integral(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(integral.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVStatus nppStat; Ncv32u szTmpBufIntegral, szTmpBufSqIntegral; nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, std::max(szTmpBufIntegral, szTmpBufSqIntegral)); ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), integral.ptr(), integral.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END dstNumRects = 0; Ncv32u lastCheckedScale = 0; NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0); std::vector<Ncv32u> scalesVector; NcvBool bFoundLargestFace = false; for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep) { Ncv32u scale = (Ncv32u)scaleIter; if (lastCheckedScale == scale) { continue; } lastCheckedScale = scale; if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width || haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height) { continue; } NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRo_i.width = srcRoi_.width + 1; srcIIRo_i.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRo_i.width / scale; scaledIIRoi.height = srcIIRo_i.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; if (searchRoi.width <= 0 || searchRoi.height <= 0) { break; } scalesVector.push_back(scale); if (gpuAllocator.isCounting()) { break; } } if (bReverseTraverseScale) { std::reverse(scalesVector.begin(), scalesVector.end()); } //TODO: handle _fair_scale_ flag for (Ncv32u i=0; i<scalesVector.size(); i++) { Ncv32u scale = scalesVector[i]; NcvSize32u srcRoi_, scaledIIRoi, searchRoi; NcvSize32u srcIIRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRoi.width = srcRoi_.width + 1; srcIIRoi.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRoi.width / scale; scaledIIRoi.height = srcIIRoi.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; NCV_SKIP_COND_BEGIN nppStat = nppiStDecimate_32u_C1R( integral.ptr(), integral.pitch(), d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStDecimate_64u_C1R( d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); const NcvRect32u rect( HAAR_STDDEV_BORDER, HAAR_STDDEV_BORDER, haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER, haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER); nppStat = nppiStRectStdDev_32f_C1R( d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), d_rectStdDev.ptr(), d_rectStdDev.pitch(), NcvSize32u(searchRoi.width, searchRoi.height), rect, (Ncv32f)scale*scale, true); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END Ncv32u detectionsOnThisScale; ncvStat = ncvApplyHaarClassifierCascade_device( d_scaledIntegralImage, d_rectStdDev, d_pixelMask, detectionsOnThisScale, haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false, searchRoi, pixelStep, (Ncv32f)scale*scale, gpuAllocator, cpuAllocator, devProp, cuStream); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_BEGIN NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment()); ncvStat = ncvGrowDetectionsVector_device( d_vecPixelMask, detectionsOnThisScale, d_hypothesesIntermediate, dstNumRects, static_cast<Ncv32u>(d_hypothesesIntermediate.length()), haar.ClassifierSize.width, haar.ClassifierSize.height, (Ncv32f)scale, cuStream); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); if (flags & NCVPipeObjDet_FindLargestObject) { if (dstNumRects == 0) { continue; } if (dstNumRects != 0) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } Ncv32u numStrongHypothesesNow = dstNumRects; ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, numStrongHypothesesNow, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (numStrongHypothesesNow > 0) { NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0]; for (Ncv32u j=1; j<numStrongHypothesesNow; j++) { if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width) { maxRect = h_hypothesesIntermediate.ptr()[j]; } } h_hypothesesIntermediate.ptr()[0] = maxRect; dstNumRects = 1; ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); bFoundLargestFace = true; break; } } NCV_SKIP_COND_END if (gpuAllocator.isCounting()) { break; } } NCVStatus ncvRetCode = NCV_SUCCESS; NCV_SKIP_COND_BEGIN if (flags & NCVPipeObjDet_FindLargestObject) { if (!bFoundLargestFace) { dstNumRects = 0; } } else { //TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left) if (dstNumRects != 0) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, dstNumRects, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (dstNumRects > d_dstRects.length()) { ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; dstNumRects = static_cast<Ncv32u>(d_dstRects.length()); } if (dstNumRects != 0) { ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); } } if (flags & NCVPipeObjDet_VisualizeInPlace) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(), d_srcImg.width(), d_srcImg.height(), d_dstRects.ptr(), dstNumRects, 255, cuStream); } NCV_SKIP_COND_END return ncvRetCode; } //============================================================================== // // Purely Host code: classifier IO, mock-ups // //============================================================================== #ifdef _SELF_TEST_ #include <float.h> #endif NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage, NCVMatrix<Ncv32f> &h_weights, NCVMatrixAlloc<Ncv32u> &h_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea) { ncvAssertReturn(h_integralImage.memType() == h_weights.memType() && h_integralImage.memType() == h_pixelMask.memType() && (h_integralImage.memType() == NCVMemoryTypeHostPageable || h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() && h_HaarStages.memType() == h_HaarFeatures.memType() && (h_HaarStages.memType() == NCVMemoryTypeHostPageable || h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height && h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height && h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width && h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(h_HaarStages.length() >= haar.NumStages && h_HaarNodes.length() >= haar.NumClassifierTotalNodes && h_HaarFeatures.length() >= haar.NumFeatures && h_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); for (Ncv32u i=0; i<anchorsRoi.height; i++) { for (Ncv32u j=0; j<h_pixelMask.stride(); j++) { if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width) { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; } else { for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++) { Ncv32f curStageSum = 0.0f; Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset(); if (iStage == 0) { if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } else { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j); } } else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u curNodeOffset = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset]; HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures(); Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect]; Ncv32u rectX, rectY, rectWidth, rectHeight; feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight); Ncv32f rectWeight = feature.getWeight(); Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride(); Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL]; Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR]; Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL]; Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR]; Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR; curNodeVal += (Ncv32f)rectSum * rectWeight; } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { curNodeOffset = nextNodeDescriptor.getNextNodeOffset(); } } curRootNodeOffset++; } Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold(); if (curStageSum < tmpStageThreshold) { //drop h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } } } } std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride()); Ncv32u i = 0; for (; i<anchorsRoi.height * h_pixelMask.stride(); i++) { if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } } numDetections = i; return NCV_SUCCESS; } NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } for (Ncv32u i=0; i<numDetsToCopy; i++) { hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale); } totalDetections += numDetsToCopy; return ncvStat; } static NCVStatus loadFromXML(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures) { #ifndef HAVE_OPENCV_OBJDETECT (void) filename; (void) haar; (void) haarStages; (void) haarClassifierNodes; (void) haarFeatures; CV_Error(cv::Error::StsNotImplemented, "This functionality requires objdetect module"); return NCV_HAAR_XML_LOADING_EXCEPTION; #else NCVStatus ncvStat; haar.NumStages = 0; haar.NumClassifierRootNodes = 0; haar.NumClassifierTotalNodes = 0; haar.NumFeatures = 0; haar.ClassifierSize.width = 0; haar.ClassifierSize.height = 0; haar.bHasStumpsOnly = true; haar.bNeedsTiltedII = false; Ncv32u curMaxTreeDepth = 0; std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes; haarStages.resize(0); haarClassifierNodes.resize(0); haarFeatures.resize(0); cv::Ptr<CvHaarClassifierCascade> oldCascade((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0)); if (!oldCascade) { return NCV_HAAR_XML_LOADING_EXCEPTION; } haar.ClassifierSize.width = oldCascade->orig_window_size.width; haar.ClassifierSize.height = oldCascade->orig_window_size.height; int stagesCount = oldCascade->count; for(int s = 0; s < stagesCount; ++s) // by stages { HaarStage64 curStage; curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size())); curStage.setStageThreshold(oldCascade->stage_classifier[s].threshold); int treesCount = oldCascade->stage_classifier[s].count; for(int t = 0; t < treesCount; ++t) // by trees { Ncv32u nodeId = 0; CvHaarClassifier* tree = &oldCascade->stage_classifier[s].classifier[t]; int nodesCount = tree->count; for(int n = 0; n < nodesCount; ++n) //by features { CvHaarFeature* feature = &tree->haar_feature[n]; HaarClassifierNode128 curNode; curNode.setThreshold(tree->threshold[n]); NcvBool bIsLeftNodeLeaf = false; NcvBool bIsRightNodeLeaf = false; HaarClassifierNodeDescriptor32 nodeLeft; if ( tree->left[n] <= 0 ) { Ncv32f leftVal = tree->alpha[-tree->left[n]]; ncvStat = nodeLeft.create(leftVal); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); bIsLeftNodeLeaf = true; } else { Ncv32u leftNodeOffset = tree->left[n]; nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1)); haar.bHasStumpsOnly = false; } curNode.setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight; if ( tree->right[n] <= 0 ) { Ncv32f rightVal = tree->alpha[-tree->right[n]]; ncvStat = nodeRight.create(rightVal); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); bIsRightNodeLeaf = true; } else { Ncv32u rightNodeOffset = tree->right[n]; nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1)); haar.bHasStumpsOnly = false; } curNode.setRightNodeDesc(nodeRight); Ncv32u tiltedVal = feature->tilted; haar.bNeedsTiltedII = (tiltedVal != 0); Ncv32u featureId = 0; for(int l = 0; l < CV_HAAR_FEATURE_MAX; ++l) //by rects { Ncv32u rectX = feature->rect[l].r.x; Ncv32u rectY = feature->rect[l].r.y; Ncv32u rectWidth = feature->rect[l].r.width; Ncv32u rectHeight = feature->rect[l].r.height; Ncv32f rectWeight = feature->rect[l].weight; if (rectWeight == 0/* && rectX == 0 &&rectY == 0 && rectWidth == 0 && rectHeight == 0*/) break; HaarFeature64 curFeature; ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height); curFeature.setWeight(rectWeight); ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); haarFeatures.push_back(curFeature); featureId++; } HaarFeatureDescriptor32 tmpFeatureDesc; ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf, featureId, static_cast<Ncv32u>(haarFeatures.size()) - featureId); ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); curNode.setFeatureDesc(tmpFeatureDesc); if (!nodeId) { //root node haarClassifierNodes.push_back(curNode); curMaxTreeDepth = 1; } else { //other node h_TmpClassifierNotRootNodes.push_back(curNode); curMaxTreeDepth++; } nodeId++; } } curStage.setNumClassifierRootNodes(treesCount); haarStages.push_back(curStage); } //fill in cascade stats haar.NumStages = static_cast<Ncv32u>(haarStages.size()); haar.NumClassifierRootNodes = static_cast<Ncv32u>(haarClassifierNodes.size()); haar.NumClassifierTotalNodes = static_cast<Ncv32u>(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size()); haar.NumFeatures = static_cast<Ncv32u>(haarFeatures.size()); //merge root and leaf nodes in one classifiers array Ncv32u offsetRoot = static_cast<Ncv32u>(haarClassifierNodes.size()); for (Ncv32u i=0; i<haarClassifierNodes.size(); i++) { HaarFeatureDescriptor32 featureDesc = haarClassifierNodes[i].getFeatureDesc(); HaarClassifierNodeDescriptor32 nodeLeft = haarClassifierNodes[i].getLeftNodeDesc(); if (!featureDesc.isLeftNodeLeaf()) { Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot; nodeLeft.create(newOffset); } haarClassifierNodes[i].setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight = haarClassifierNodes[i].getRightNodeDesc(); if (!featureDesc.isRightNodeLeaf()) { Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot; nodeRight.create(newOffset); } haarClassifierNodes[i].setRightNodeDesc(nodeRight); } for (Ncv32u i=0; i<h_TmpClassifierNotRootNodes.size(); i++) { HaarFeatureDescriptor32 featureDesc = h_TmpClassifierNotRootNodes[i].getFeatureDesc(); HaarClassifierNodeDescriptor32 nodeLeft = h_TmpClassifierNotRootNodes[i].getLeftNodeDesc(); if (!featureDesc.isLeftNodeLeaf()) { Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot; nodeLeft.create(newOffset); } h_TmpClassifierNotRootNodes[i].setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight = h_TmpClassifierNotRootNodes[i].getRightNodeDesc(); if (!featureDesc.isRightNodeLeaf()) { Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot; nodeRight.create(newOffset); } h_TmpClassifierNotRootNodes[i].setRightNodeDesc(nodeRight); haarClassifierNodes.push_back(h_TmpClassifierNotRootNodes[i]); } return NCV_SUCCESS; #endif } #define NVBIN_HAAR_SIZERESERVED 16 #define NVBIN_HAAR_VERSION 0x1 static NCVStatus loadFromNVBIN(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures) { size_t readCount; FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); Ncv32u fsize; readCount = fread(&fsize, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fseek(fp, 0, SEEK_END); Ncv32u fsizeActual = ftell(fp); ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR); std::vector<unsigned char> fdata; fdata.resize(fsize); Ncv32u dataOffset = 0; fseek(fp, 0, SEEK_SET); readCount = fread(&fdata[0], fsize, 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); //data dataOffset = NVBIN_HAAR_SIZERESERVED; haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvSize32u); haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haarStages.resize(haar.NumStages); haarClassifierNodes.resize(haar.NumClassifierTotalNodes); haarFeatures.resize(haar.NumFeatures); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages); dataOffset += szStages; memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers); dataOffset += szClassifiers; memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures); dataOffset += szFeatures; return NCV_SUCCESS; } NCVStatus ncvHaarGetClassifierSize(const cv::String &filename, Ncv32u &numStages, Ncv32u &numNodes, Ncv32u &numFeatures) { size_t readCount; NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower); if (fext == "nvbin") { FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET); Ncv32u tmp; readCount = fread(&numStages, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&tmp, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numNodes, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numFeatures, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); } else if (fext == "xml") { HaarClassifierCascadeDescriptor haar; std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); numStages = haar.NumStages; numNodes = haar.NumClassifierTotalNodes; numFeatures = haar.NumFeatures; } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } return NCV_SUCCESS; } NCVStatus ncvHaarLoadFromFile_host(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower); std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; if (fext == "nvbin") { ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else if (fext == "xml") { ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY); memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64)); memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128)); memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64)); return NCV_SUCCESS; } NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename, HaarClassifierCascadeDescriptor haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); Ncv32u dataOffset = 0; std::vector<unsigned char> fdata; fdata.resize(szStages+szClassifiers+szFeatures+1024, 0); //header *(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION; //data dataOffset = NVBIN_HAAR_SIZERESERVED; *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures; dataOffset += sizeof(Ncv32u); *(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize; dataOffset += sizeof(NcvSize32u); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII; dataOffset += sizeof(NcvBool); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly; dataOffset += sizeof(NcvBool); memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages); dataOffset += szStages; memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers); dataOffset += szClassifiers; memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures); dataOffset += szFeatures; Ncv32u fsize = dataOffset; //TODO: CRC32 here //update header dataOffset = sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = fsize; FILE *fp = fopen(filename.c_str(), "wb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); fwrite(&fdata[0], fsize, 1, fp); fclose(fp); return NCV_SUCCESS; }
3b979c4c4f69996204d7e19e2d388643bac3bdbc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes CUDA #include <hip/hip_runtime.h> extern "C" { #include "MatUtil.h" } __device__ int Min(int a, int b) { return a < b ? a : b; } __global__ void NaiveFloydWarshall(int* mat, int k, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N*N) { int i = idx/N; int j = idx - i*N; if (mat[i*N + k] != -1 && mat[k*N + j] != -1) { if (mat[idx] == -1) { mat[idx] = mat[i*N + k] + mat[k*N +j]; } else { mat[idx] = Min(mat[i*N + k] + mat[k*N + j], mat[idx]); } } } } void NaiveFloydWarshallDriver(int* mat, int N, int thread_per_block) { int* cuda_mat; int size = sizeof(int) * N * N; hipMalloc((void**) &cuda_mat, size); hipMemcpy(cuda_mat, mat, size, hipMemcpyHostToDevice); int num_block = ceil(1.0*N*N/(thread_per_block)); for (int k = 0; k < N; ++k) { hipLaunchKernelGGL(( NaiveFloydWarshall), dim3(num_block), dim3((thread_per_block)), 0, 0, cuda_mat, k, N); } hipMemcpy(mat, cuda_mat, size, hipMemcpyDeviceToHost); hipFree(cuda_mat); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { if(argc != 4) { printf("Usage: test {N} {run_sequential_check: 'T' or 'F'} {thread_per_block}\n"); exit(-1); } char run_sequential_check = argv[2][0]; int thread_per_block = atoi(argv[3]); //generate a random matrix. size_t N = atoi(argv[1]); int *mat = (int*)malloc(sizeof(int)*N*N); GenMatrix(mat, N); //compute your results int *result = (int*)malloc(sizeof(int)*N*N); memcpy(result, mat, sizeof(int)*N*N); //replace by parallel algorithm NaiveFloydWarshallDriver(result, N, thread_per_block); //compare your result with reference result if (run_sequential_check == 'T') { int *ref = (int*)malloc(sizeof(int)*N*N); memcpy(ref, mat, sizeof(int)*N*N); ST_APSP(ref, N); if(CmpArray(result, ref, N*N)) printf("Your result is correct.\n"); else printf("Your result is wrong.\n"); } }
3b979c4c4f69996204d7e19e2d388643bac3bdbc.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes CUDA #include <cuda_runtime.h> extern "C" { #include "MatUtil.h" } __device__ int Min(int a, int b) { return a < b ? a : b; } __global__ void NaiveFloydWarshall(int* mat, int k, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N*N) { int i = idx/N; int j = idx - i*N; if (mat[i*N + k] != -1 && mat[k*N + j] != -1) { if (mat[idx] == -1) { mat[idx] = mat[i*N + k] + mat[k*N +j]; } else { mat[idx] = Min(mat[i*N + k] + mat[k*N + j], mat[idx]); } } } } void NaiveFloydWarshallDriver(int* mat, int N, int thread_per_block) { int* cuda_mat; int size = sizeof(int) * N * N; cudaMalloc((void**) &cuda_mat, size); cudaMemcpy(cuda_mat, mat, size, cudaMemcpyHostToDevice); int num_block = ceil(1.0*N*N/(thread_per_block)); for (int k = 0; k < N; ++k) { NaiveFloydWarshall<<<num_block, (thread_per_block)>>>(cuda_mat, k, N); } cudaMemcpy(mat, cuda_mat, size, cudaMemcpyDeviceToHost); cudaFree(cuda_mat); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { if(argc != 4) { printf("Usage: test {N} {run_sequential_check: 'T' or 'F'} {thread_per_block}\n"); exit(-1); } char run_sequential_check = argv[2][0]; int thread_per_block = atoi(argv[3]); //generate a random matrix. size_t N = atoi(argv[1]); int *mat = (int*)malloc(sizeof(int)*N*N); GenMatrix(mat, N); //compute your results int *result = (int*)malloc(sizeof(int)*N*N); memcpy(result, mat, sizeof(int)*N*N); //replace by parallel algorithm NaiveFloydWarshallDriver(result, N, thread_per_block); //compare your result with reference result if (run_sequential_check == 'T') { int *ref = (int*)malloc(sizeof(int)*N*N); memcpy(ref, mat, sizeof(int)*N*N); ST_APSP(ref, N); if(CmpArray(result, ref, N*N)) printf("Your result is correct.\n"); else printf("Your result is wrong.\n"); } }
dc458ff0a8e4106d1c43e28e1d6cfd91623d89d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include <stdlib.h> #include <sys/time.h> #include <string.h> #define CHECK(call)\ {\ const hipError_t error = call;\ if (error != hipSuccess)\ {\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code:%d, reason: %s\n", error, hipGetErrorString(error));\ exit(1);\ }\ } // FUNCTIONS FOR SETUP int numberCells(float, float *); void setVector(int, float, float *, float *); void setObst(float *); void setGoal(float *, float *, float *); void setInitialValue(float *, float *, float *); void conditionValue(float *, float *, float *, int, int, int); void setInitialPolicy(float *, float *, char *); void conditionPolicy(float *, float *, char *, int, int, int); // FUNCTIONS FOR VALUE ITERATION __global__ void mainOnGPU(float *, float *, float *, char *, float *, char *); __global__ void valueIteration(float *, float *, float *, char *, float *); __device__ void conditionR(int, int, int, float *, float *); __device__ void conditionTheta(int, int, int, float *, float *); __device__ void conditionPhi(int, int, int, float *, float *); __device__ void computeTotalCost(float *, float *); __device__ float computeNewValue(float *); __device__ float computeNewPolicy(float *); // FUNCTIONS FOR ANALYSIS double cpuSecond(void); // DEFINE GLOBAL PARAMETERS IN CPU int nr, ntheta, nphi; float perr; float gamma1; float vGoal, vObst, vMove; float vInitial; int numActions; // DEFINE GLOBAL PARAMETERS IN GPU __constant__ int d_nr, d_ntheta, d_nphi; __constant__ float d_perr; __constant__ float d_gamma1; __constant__ float d_vGoal, d_vObst, d_vMove; __constant__ float d_vInitial; __constant__ int d_numActions; int main(int argc, char **argv) { double iStart = cpuSecond(); numActions=7; // DEFINE PARAMETERS float dr, dtheta, dphi; float rdim[2], thetadim[2], phidim[2]; float *rVec, *thetaVec, *phiVec; // - minimum grid resolution for r, theta, phi dr = atof(argv[1]), dtheta = atof(argv[2]), dphi = atof(argv[3]); // - dimensions of the state space rdim[0] = 0.0, rdim[1] = 10.0; thetadim[0] = 0.0, thetadim[1] = 360.0; phidim[0] = 0.0, phidim[1] = 360.0; // - number of grid cells nr = numberCells(dr, rdim); ntheta = numberCells(dtheta, thetadim); nphi = numberCells(dphi, phidim); printf("%d,", nr*ntheta*nphi); // - vectors for r, theta, phi rVec = (float *)malloc(sizeof(float)*nr); thetaVec = (float *)malloc(sizeof(float)*ntheta); phiVec = (float *)malloc(sizeof(float)*nphi); setVector(nr, dr, rdim, rVec); setVector(ntheta, dtheta, thetadim, thetaVec); setVector(nphi, dphi, phidim, phiVec); // - probability of going the wrong way perr = 0.0; // attenuation rate gamma1 = 1.0; // - value of goal, collision, movement vGoal = 100.0; vObst = -100.0; vMove = -1.0; // initial guess at all values vInitial = 0.0; // DEFINE OBSTACLE AND GOAL LOCATIONS float *isobst, *isgoal; isobst = (float *)calloc(nr*ntheta*nphi, sizeof(float)); isgoal = (float *)calloc(nr*ntheta*nphi, sizeof(float)); setObst(isobst); setGoal(thetaVec, phiVec, isgoal); // DEFINE OBSTACLE AND GOAL LOCATIONS IN GPU float *d_isobst, *d_isgoal; CHECK(hipMalloc((float**)&d_isobst, nr*ntheta*nphi*sizeof(float))); CHECK(hipMalloc((float**)&d_isgoal, nr*ntheta*nphi*sizeof(float))); CHECK(hipMemcpy(d_isobst, isobst, nr*ntheta*nphi*sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_isgoal, isgoal, nr*ntheta*nphi*sizeof(float), hipMemcpyHostToDevice)); // DEFINE INITIAL GUESS AT VALUE AND POLICY float *J; char *U; J = (float *)calloc(nr*ntheta*nphi, sizeof(float)); U = (char *)calloc(nr*ntheta*nphi, sizeof(char)); setInitialValue(isobst, isgoal, J); setInitialPolicy(isobst, isgoal, U); // DO VALUE ITERATION float *Jprev; char *Uprev; Jprev = (float *)calloc(nr*ntheta*nphi, sizeof(float)); Uprev = (char *)calloc(nr*ntheta*nphi, sizeof(char)); // TRANSFER VARIABLE DATA FROM HOST TO DEVICE CHECK(hipMemcpyToSymbol(d_nr, &nr, sizeof(int), 0, hipMemcpyHostToDevice)); CHECK(hipMemcpyToSymbol(d_ntheta, &ntheta, sizeof(int), 0, hipMemcpyHostToDevice)); CHECK(hipMemcpyToSymbol(d_nphi, &nphi, sizeof(int), 0, hipMemcpyHostToDevice)); CHECK(hipMemcpyToSymbol(d_perr, &perr, sizeof(float), 0, hipMemcpyHostToDevice)); CHECK(hipMemcpyToSymbol(d_gamma1, &gamma1, sizeof(float), 0, hipMemcpyHostToDevice)); CHECK(hipMemcpyToSymbol(d_vGoal, &vGoal, sizeof(float), 0, hipMemcpyHostToDevice)); CHECK(hipMemcpyToSymbol(d_vObst, &vObst, sizeof(float), 0, hipMemcpyHostToDevice)); CHECK(hipMemcpyToSymbol(d_vMove, &vMove, sizeof(float), 0, hipMemcpyHostToDevice)); CHECK(hipMemcpyToSymbol(d_vInitial, &vInitial, sizeof(float), 0, hipMemcpyHostToDevice)); CHECK(hipMemcpyToSymbol(d_numActions, &numActions, sizeof(int), 0, hipMemcpyHostToDevice)); // allocate memory at device float *d_J, *d_Jprev; char *d_U, *d_Uprev; CHECK(hipMalloc((float**)&d_J, nr*ntheta*nphi*sizeof(float))); CHECK(hipMalloc((char**)&d_U, nr*ntheta*nphi*sizeof(char))); CHECK(hipMalloc((float**)&d_Jprev, nr*ntheta*nphi*sizeof(float))); CHECK(hipMalloc((char**)&d_Uprev, nr*ntheta*nphi*sizeof(char))); // transfer data from host to device CHECK(hipMemcpy(d_J, J, nr*ntheta*nphi*sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_U, U, nr*ntheta*nphi*sizeof(char), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_Jprev, Jprev, nr*ntheta*nphi*sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_Uprev, Uprev, nr*ntheta*nphi*sizeof(char), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( mainOnGPU), dim3(1),dim3(1), 0, 0, d_isobst, d_isgoal, d_J, d_U, d_Jprev, d_Uprev); // copy result from device to host CHECK(hipMemcpy(J, d_J, nr*ntheta*nphi*sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(U, d_U, nr*ntheta*nphi*sizeof(char), hipMemcpyDeviceToHost)); CHECK(hipFree(d_J)); CHECK(hipFree(d_U)); CHECK(hipFree(d_Jprev)); CHECK(hipFree(d_Uprev)); // FREE USED MEMORY IN CPU free(rVec); free(thetaVec); free(phiVec); free(isobst); free(isgoal); free(J); free(U); free(Jprev); free(Uprev); // FREE USED MEMORY IN GPU CHECK(hipFree(d_isobst)); CHECK(hipFree(d_isgoal)); double iElaps = cpuSecond()-iStart; printf("%f\n", iElaps*1000.0f); return(0); } /*--------------- FUNCTIONS FOR SETUP ----------------*/ int numberCells(float d, float *dim) { int n = 0; float diff; diff = dim[1]-dim[0]; if(d<0 || d>diff){ printf("value of resolution or dimension is invalid.\n"); } else{ n = floorf(diff/d+1.0); } return n; } void setVector(int n, float d, float *dim, float *Vec) { float value; value = dim[0]; for(int i=0; i<n; i++){ Vec[i] = value; value += d; } } void setObst(float *isobst) { for(int j=0; j<ntheta; j++){ for(int k=0;k<nphi; k++){ isobst[nr*ntheta*k+(nr-1)*ntheta+j] = 1; } } } void setGoal(float *thetaVec, float *phiVec, float *isgoal) { for(int j=0; j<ntheta; j++){ for(int k=0; k<nphi; k++){ if(thetaVec[j]==phiVec[k]) isgoal[nr*ntheta*k+j] = 1; } } } void setInitialValue(float *isobst, float *isgoal, float *J) { for(int i=0; i<nr; i++){ for(int j=0; j<ntheta; j++){ for(int k=0; k<nphi; k++){ conditionValue(isobst, isgoal, J, i, j, k); } } } } void conditionValue(float *isobst, float *isgoal, float *J, int i, int j, int k) { if(isobst[nr*ntheta*k+ntheta*i+j]){ J[nr*ntheta*k+ntheta*i+j] = vObst; } else if(isgoal[nr*ntheta*k+ntheta*i+j]){ J[nr*ntheta*k+ntheta*i+j] = vGoal; } else{ J[nr*ntheta*k+ntheta*i+j] = vInitial; } } void setInitialPolicy(float *isobst, float *isgoal, char *U) { srand((unsigned)time(NULL)); for(int i=0; i<nr; i++){ for(int j=0; j<ntheta; j++){ for(int k=0; k<nphi; k++){ conditionPolicy(isobst, isgoal, U, i, j, k); } } } } void conditionPolicy(float *isobst, float *isgoal, char *U, int i, int j, int k) { if(isobst[nr*ntheta*k+ntheta*i+j]){ U[nr*ntheta*k+ntheta*i+j] = -1; } else if(isgoal[nr*ntheta*k+ntheta*i+j]){ U[nr*ntheta*k+ntheta*i+j] = -1; } else{ char r = rand() % numActions; U[nr*ntheta*k+ntheta*i+j] = r; } } /*--------------- FUNCTIONS FOR VALUE ITERATION ----------------*/ __global__ void mainOnGPU(float *d_isobst, float *d_isgoal, float *d_J, char *d_U, float *d_Jprev, char *d_Uprev) { dim3 nThreads(2,4,4); dim3 nBlocks((d_nr+nThreads.x-1)/nThreads.x,(d_ntheta+nThreads.y-1)/nThreads.y,(d_nphi+nThreads.z-1)/nThreads.z); float error=1; int t=1; while(error!=0){ //printf("Iteration %d\n", t); // Iterate over all states. memcpy(d_Jprev, d_J, sizeof(float)*d_nr*d_ntheta*d_nphi); memcpy(d_Uprev, d_U, sizeof(char)*d_nr*d_ntheta*d_nphi); // call kernel hipLaunchKernelGGL(( valueIteration), dim3(nBlocks), dim3(nThreads), 0, 0, d_isobst, d_isgoal, d_J, d_U, d_Jprev); // CHECK(hipDeviceSynchronize()); error=0; for(int x=0; x<d_nr*d_ntheta*d_nphi; x++){ //printf("%2d d_J=%3.1f d_Jprev= %3.1f d_U=%d\n", x, d_J[x], d_Jprev[x], d_U[x]); error+=(d_J[x]-d_Jprev[x]); } t+=1; //printf("\n"); } } __global__ void valueIteration(float *isobst, float *isgoal, float *J, char *U, float *Jprev) { int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; int k=blockIdx.z*blockDim.z+threadIdx.z; //printf("i=%d j=%d k=%d\n", blockIdx.x,j,k); float *tempCost, *totalCost; tempCost = (float*)malloc(d_numActions*sizeof(float)); totalCost = (float*)malloc(d_numActions*sizeof(float)); if(i<d_nr && j<d_ntheta && k<d_nphi){ if(!isobst[d_nr*d_ntheta*k+d_ntheta*i+j] && !isgoal[d_nr*d_ntheta*k+d_ntheta*i+j]){ tempCost[0]=Jprev[d_nr*d_ntheta*k+d_ntheta*i+j]; // condition of r conditionR(i, j, k, tempCost, Jprev); // Compute the total expected cost for each of the possible actions. computeTotalCost(tempCost, totalCost); // Compute the new exptected cost-to-go, by taking the maximum over // possible actions. J[d_nr*d_ntheta*k+d_ntheta*i+j] = computeNewValue(totalCost); U[d_nr*d_ntheta*k+d_ntheta*i+j] = computeNewPolicy(totalCost); } } free(tempCost); free(totalCost); __syncthreads(); } __device__ void conditionR(int i, int j, int k, float *tempCost, float *Jprev) { if(i==0){ tempCost[1] = Jprev[d_nr*d_ntheta*k+d_ntheta*(i+1)+j]; tempCost[2] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+j]; } else{ tempCost[1] = Jprev[d_nr*d_ntheta*k+d_ntheta*(i+1)+j]; tempCost[2] = Jprev[d_nr*d_ntheta*k+d_ntheta*(i-1)+j]; } conditionTheta(i, j, k, tempCost, Jprev); } __device__ void conditionTheta(int i, int j, int k, float *tempCost, float *Jprev) { if(j==0){ tempCost[3] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+(j+1)]; tempCost[4] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+(d_ntheta-1)]; } else if(j==d_ntheta-1){ tempCost[3] = Jprev[d_nr*d_ntheta*k+d_ntheta*i]; tempCost[4] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+(j-1)]; } else{ tempCost[3] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+(j+1)]; tempCost[4] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+(j-1)]; } conditionPhi(i, j, k, tempCost, Jprev); } __device__ void conditionPhi(int i, int j, int k, float *tempCost, float *Jprev) { if(k==0){ tempCost[5] = Jprev[d_nr*d_ntheta*(k+1)+d_ntheta*i+j]; tempCost[6] = Jprev[d_nr*d_ntheta*(d_nphi-1)+d_ntheta*i+j]; } else if(k==d_nphi-1){ tempCost[5] = Jprev[d_ntheta*i+j]; tempCost[6] = Jprev[d_nr*d_ntheta*(k-1)+d_ntheta*i+j]; } else{ tempCost[5] = Jprev[d_nr*d_ntheta*(k+1)+d_ntheta*i+j]; tempCost[6] = Jprev[d_nr*d_ntheta*(k-1)+d_ntheta*i+j]; } } __device__ void computeTotalCost(float *tempCost, float *totalCost) { float tempCostTotal=0; for(int n=0; n<d_numActions; n++){ tempCostTotal+=tempCost[n]; } for(int n=0; n<d_numActions; n++){ totalCost[n]=d_vMove+d_gamma1*((1-d_perr)*tempCost[n]+(d_perr/6)*(tempCostTotal-tempCost[n])); } } __device__ float computeNewValue(float *totalCost) { float max; max = totalCost[0]; for(int n=0; n<d_numActions; n++){ if(totalCost[n]>max) max=totalCost[n]; } return max; } __device__ float computeNewPolicy(float *totalCost) { float max; float idx; max = totalCost[0]; for(int n=0; n<d_numActions; n++){ if(totalCost[n]>max){ max=totalCost[n]; idx=n; } } return idx; } /*-------------- FUNCTION FOR ANALYSIS --------------*/ double cpuSecond(void) { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); }
dc458ff0a8e4106d1c43e28e1d6cfd91623d89d9.cu
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <sys/time.h> #include <string.h> #define CHECK(call)\ {\ const cudaError_t error = call;\ if (error != cudaSuccess)\ {\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error));\ exit(1);\ }\ } // FUNCTIONS FOR SETUP int numberCells(float, float *); void setVector(int, float, float *, float *); void setObst(float *); void setGoal(float *, float *, float *); void setInitialValue(float *, float *, float *); void conditionValue(float *, float *, float *, int, int, int); void setInitialPolicy(float *, float *, char *); void conditionPolicy(float *, float *, char *, int, int, int); // FUNCTIONS FOR VALUE ITERATION __global__ void mainOnGPU(float *, float *, float *, char *, float *, char *); __global__ void valueIteration(float *, float *, float *, char *, float *); __device__ void conditionR(int, int, int, float *, float *); __device__ void conditionTheta(int, int, int, float *, float *); __device__ void conditionPhi(int, int, int, float *, float *); __device__ void computeTotalCost(float *, float *); __device__ float computeNewValue(float *); __device__ float computeNewPolicy(float *); // FUNCTIONS FOR ANALYSIS double cpuSecond(void); // DEFINE GLOBAL PARAMETERS IN CPU int nr, ntheta, nphi; float perr; float gamma1; float vGoal, vObst, vMove; float vInitial; int numActions; // DEFINE GLOBAL PARAMETERS IN GPU __constant__ int d_nr, d_ntheta, d_nphi; __constant__ float d_perr; __constant__ float d_gamma1; __constant__ float d_vGoal, d_vObst, d_vMove; __constant__ float d_vInitial; __constant__ int d_numActions; int main(int argc, char **argv) { double iStart = cpuSecond(); numActions=7; // DEFINE PARAMETERS float dr, dtheta, dphi; float rdim[2], thetadim[2], phidim[2]; float *rVec, *thetaVec, *phiVec; // - minimum grid resolution for r, theta, phi dr = atof(argv[1]), dtheta = atof(argv[2]), dphi = atof(argv[3]); // - dimensions of the state space rdim[0] = 0.0, rdim[1] = 10.0; thetadim[0] = 0.0, thetadim[1] = 360.0; phidim[0] = 0.0, phidim[1] = 360.0; // - number of grid cells nr = numberCells(dr, rdim); ntheta = numberCells(dtheta, thetadim); nphi = numberCells(dphi, phidim); printf("%d,", nr*ntheta*nphi); // - vectors for r, theta, phi rVec = (float *)malloc(sizeof(float)*nr); thetaVec = (float *)malloc(sizeof(float)*ntheta); phiVec = (float *)malloc(sizeof(float)*nphi); setVector(nr, dr, rdim, rVec); setVector(ntheta, dtheta, thetadim, thetaVec); setVector(nphi, dphi, phidim, phiVec); // - probability of going the wrong way perr = 0.0; // attenuation rate gamma1 = 1.0; // - value of goal, collision, movement vGoal = 100.0; vObst = -100.0; vMove = -1.0; // initial guess at all values vInitial = 0.0; // DEFINE OBSTACLE AND GOAL LOCATIONS float *isobst, *isgoal; isobst = (float *)calloc(nr*ntheta*nphi, sizeof(float)); isgoal = (float *)calloc(nr*ntheta*nphi, sizeof(float)); setObst(isobst); setGoal(thetaVec, phiVec, isgoal); // DEFINE OBSTACLE AND GOAL LOCATIONS IN GPU float *d_isobst, *d_isgoal; CHECK(cudaMalloc((float**)&d_isobst, nr*ntheta*nphi*sizeof(float))); CHECK(cudaMalloc((float**)&d_isgoal, nr*ntheta*nphi*sizeof(float))); CHECK(cudaMemcpy(d_isobst, isobst, nr*ntheta*nphi*sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_isgoal, isgoal, nr*ntheta*nphi*sizeof(float), cudaMemcpyHostToDevice)); // DEFINE INITIAL GUESS AT VALUE AND POLICY float *J; char *U; J = (float *)calloc(nr*ntheta*nphi, sizeof(float)); U = (char *)calloc(nr*ntheta*nphi, sizeof(char)); setInitialValue(isobst, isgoal, J); setInitialPolicy(isobst, isgoal, U); // DO VALUE ITERATION float *Jprev; char *Uprev; Jprev = (float *)calloc(nr*ntheta*nphi, sizeof(float)); Uprev = (char *)calloc(nr*ntheta*nphi, sizeof(char)); // TRANSFER VARIABLE DATA FROM HOST TO DEVICE CHECK(cudaMemcpyToSymbol(d_nr, &nr, sizeof(int), 0, cudaMemcpyHostToDevice)); CHECK(cudaMemcpyToSymbol(d_ntheta, &ntheta, sizeof(int), 0, cudaMemcpyHostToDevice)); CHECK(cudaMemcpyToSymbol(d_nphi, &nphi, sizeof(int), 0, cudaMemcpyHostToDevice)); CHECK(cudaMemcpyToSymbol(d_perr, &perr, sizeof(float), 0, cudaMemcpyHostToDevice)); CHECK(cudaMemcpyToSymbol(d_gamma1, &gamma1, sizeof(float), 0, cudaMemcpyHostToDevice)); CHECK(cudaMemcpyToSymbol(d_vGoal, &vGoal, sizeof(float), 0, cudaMemcpyHostToDevice)); CHECK(cudaMemcpyToSymbol(d_vObst, &vObst, sizeof(float), 0, cudaMemcpyHostToDevice)); CHECK(cudaMemcpyToSymbol(d_vMove, &vMove, sizeof(float), 0, cudaMemcpyHostToDevice)); CHECK(cudaMemcpyToSymbol(d_vInitial, &vInitial, sizeof(float), 0, cudaMemcpyHostToDevice)); CHECK(cudaMemcpyToSymbol(d_numActions, &numActions, sizeof(int), 0, cudaMemcpyHostToDevice)); // allocate memory at device float *d_J, *d_Jprev; char *d_U, *d_Uprev; CHECK(cudaMalloc((float**)&d_J, nr*ntheta*nphi*sizeof(float))); CHECK(cudaMalloc((char**)&d_U, nr*ntheta*nphi*sizeof(char))); CHECK(cudaMalloc((float**)&d_Jprev, nr*ntheta*nphi*sizeof(float))); CHECK(cudaMalloc((char**)&d_Uprev, nr*ntheta*nphi*sizeof(char))); // transfer data from host to device CHECK(cudaMemcpy(d_J, J, nr*ntheta*nphi*sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_U, U, nr*ntheta*nphi*sizeof(char), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_Jprev, Jprev, nr*ntheta*nphi*sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_Uprev, Uprev, nr*ntheta*nphi*sizeof(char), cudaMemcpyHostToDevice)); mainOnGPU<<<1,1>>>(d_isobst, d_isgoal, d_J, d_U, d_Jprev, d_Uprev); // copy result from device to host CHECK(cudaMemcpy(J, d_J, nr*ntheta*nphi*sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(U, d_U, nr*ntheta*nphi*sizeof(char), cudaMemcpyDeviceToHost)); CHECK(cudaFree(d_J)); CHECK(cudaFree(d_U)); CHECK(cudaFree(d_Jprev)); CHECK(cudaFree(d_Uprev)); // FREE USED MEMORY IN CPU free(rVec); free(thetaVec); free(phiVec); free(isobst); free(isgoal); free(J); free(U); free(Jprev); free(Uprev); // FREE USED MEMORY IN GPU CHECK(cudaFree(d_isobst)); CHECK(cudaFree(d_isgoal)); double iElaps = cpuSecond()-iStart; printf("%f\n", iElaps*1000.0f); return(0); } /*--------------- FUNCTIONS FOR SETUP ----------------*/ int numberCells(float d, float *dim) { int n = 0; float diff; diff = dim[1]-dim[0]; if(d<0 || d>diff){ printf("value of resolution or dimension is invalid.\n"); } else{ n = floorf(diff/d+1.0); } return n; } void setVector(int n, float d, float *dim, float *Vec) { float value; value = dim[0]; for(int i=0; i<n; i++){ Vec[i] = value; value += d; } } void setObst(float *isobst) { for(int j=0; j<ntheta; j++){ for(int k=0;k<nphi; k++){ isobst[nr*ntheta*k+(nr-1)*ntheta+j] = 1; } } } void setGoal(float *thetaVec, float *phiVec, float *isgoal) { for(int j=0; j<ntheta; j++){ for(int k=0; k<nphi; k++){ if(thetaVec[j]==phiVec[k]) isgoal[nr*ntheta*k+j] = 1; } } } void setInitialValue(float *isobst, float *isgoal, float *J) { for(int i=0; i<nr; i++){ for(int j=0; j<ntheta; j++){ for(int k=0; k<nphi; k++){ conditionValue(isobst, isgoal, J, i, j, k); } } } } void conditionValue(float *isobst, float *isgoal, float *J, int i, int j, int k) { if(isobst[nr*ntheta*k+ntheta*i+j]){ J[nr*ntheta*k+ntheta*i+j] = vObst; } else if(isgoal[nr*ntheta*k+ntheta*i+j]){ J[nr*ntheta*k+ntheta*i+j] = vGoal; } else{ J[nr*ntheta*k+ntheta*i+j] = vInitial; } } void setInitialPolicy(float *isobst, float *isgoal, char *U) { srand((unsigned)time(NULL)); for(int i=0; i<nr; i++){ for(int j=0; j<ntheta; j++){ for(int k=0; k<nphi; k++){ conditionPolicy(isobst, isgoal, U, i, j, k); } } } } void conditionPolicy(float *isobst, float *isgoal, char *U, int i, int j, int k) { if(isobst[nr*ntheta*k+ntheta*i+j]){ U[nr*ntheta*k+ntheta*i+j] = -1; } else if(isgoal[nr*ntheta*k+ntheta*i+j]){ U[nr*ntheta*k+ntheta*i+j] = -1; } else{ char r = rand() % numActions; U[nr*ntheta*k+ntheta*i+j] = r; } } /*--------------- FUNCTIONS FOR VALUE ITERATION ----------------*/ __global__ void mainOnGPU(float *d_isobst, float *d_isgoal, float *d_J, char *d_U, float *d_Jprev, char *d_Uprev) { dim3 nThreads(2,4,4); dim3 nBlocks((d_nr+nThreads.x-1)/nThreads.x,(d_ntheta+nThreads.y-1)/nThreads.y,(d_nphi+nThreads.z-1)/nThreads.z); float error=1; int t=1; while(error!=0){ //printf("Iteration %d\n", t); // Iterate over all states. memcpy(d_Jprev, d_J, sizeof(float)*d_nr*d_ntheta*d_nphi); memcpy(d_Uprev, d_U, sizeof(char)*d_nr*d_ntheta*d_nphi); // call kernel valueIteration<<<nBlocks, nThreads>>>(d_isobst, d_isgoal, d_J, d_U, d_Jprev); // CHECK(cudaDeviceSynchronize()); error=0; for(int x=0; x<d_nr*d_ntheta*d_nphi; x++){ //printf("%2d d_J=%3.1f d_Jprev= %3.1f d_U=%d\n", x, d_J[x], d_Jprev[x], d_U[x]); error+=(d_J[x]-d_Jprev[x]); } t+=1; //printf("\n"); } } __global__ void valueIteration(float *isobst, float *isgoal, float *J, char *U, float *Jprev) { int i=blockIdx.x*blockDim.x+threadIdx.x; int j=blockIdx.y*blockDim.y+threadIdx.y; int k=blockIdx.z*blockDim.z+threadIdx.z; //printf("i=%d j=%d k=%d\n", blockIdx.x,j,k); float *tempCost, *totalCost; tempCost = (float*)malloc(d_numActions*sizeof(float)); totalCost = (float*)malloc(d_numActions*sizeof(float)); if(i<d_nr && j<d_ntheta && k<d_nphi){ if(!isobst[d_nr*d_ntheta*k+d_ntheta*i+j] && !isgoal[d_nr*d_ntheta*k+d_ntheta*i+j]){ tempCost[0]=Jprev[d_nr*d_ntheta*k+d_ntheta*i+j]; // condition of r conditionR(i, j, k, tempCost, Jprev); // Compute the total expected cost for each of the possible actions. computeTotalCost(tempCost, totalCost); // Compute the new exptected cost-to-go, by taking the maximum over // possible actions. J[d_nr*d_ntheta*k+d_ntheta*i+j] = computeNewValue(totalCost); U[d_nr*d_ntheta*k+d_ntheta*i+j] = computeNewPolicy(totalCost); } } free(tempCost); free(totalCost); __syncthreads(); } __device__ void conditionR(int i, int j, int k, float *tempCost, float *Jprev) { if(i==0){ tempCost[1] = Jprev[d_nr*d_ntheta*k+d_ntheta*(i+1)+j]; tempCost[2] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+j]; } else{ tempCost[1] = Jprev[d_nr*d_ntheta*k+d_ntheta*(i+1)+j]; tempCost[2] = Jprev[d_nr*d_ntheta*k+d_ntheta*(i-1)+j]; } conditionTheta(i, j, k, tempCost, Jprev); } __device__ void conditionTheta(int i, int j, int k, float *tempCost, float *Jprev) { if(j==0){ tempCost[3] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+(j+1)]; tempCost[4] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+(d_ntheta-1)]; } else if(j==d_ntheta-1){ tempCost[3] = Jprev[d_nr*d_ntheta*k+d_ntheta*i]; tempCost[4] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+(j-1)]; } else{ tempCost[3] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+(j+1)]; tempCost[4] = Jprev[d_nr*d_ntheta*k+d_ntheta*i+(j-1)]; } conditionPhi(i, j, k, tempCost, Jprev); } __device__ void conditionPhi(int i, int j, int k, float *tempCost, float *Jprev) { if(k==0){ tempCost[5] = Jprev[d_nr*d_ntheta*(k+1)+d_ntheta*i+j]; tempCost[6] = Jprev[d_nr*d_ntheta*(d_nphi-1)+d_ntheta*i+j]; } else if(k==d_nphi-1){ tempCost[5] = Jprev[d_ntheta*i+j]; tempCost[6] = Jprev[d_nr*d_ntheta*(k-1)+d_ntheta*i+j]; } else{ tempCost[5] = Jprev[d_nr*d_ntheta*(k+1)+d_ntheta*i+j]; tempCost[6] = Jprev[d_nr*d_ntheta*(k-1)+d_ntheta*i+j]; } } __device__ void computeTotalCost(float *tempCost, float *totalCost) { float tempCostTotal=0; for(int n=0; n<d_numActions; n++){ tempCostTotal+=tempCost[n]; } for(int n=0; n<d_numActions; n++){ totalCost[n]=d_vMove+d_gamma1*((1-d_perr)*tempCost[n]+(d_perr/6)*(tempCostTotal-tempCost[n])); } } __device__ float computeNewValue(float *totalCost) { float max; max = totalCost[0]; for(int n=0; n<d_numActions; n++){ if(totalCost[n]>max) max=totalCost[n]; } return max; } __device__ float computeNewPolicy(float *totalCost) { float max; float idx; max = totalCost[0]; for(int n=0; n<d_numActions; n++){ if(totalCost[n]>max){ max=totalCost[n]; idx=n; } } return idx; } /*-------------- FUNCTION FOR ANALYSIS --------------*/ double cpuSecond(void) { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); }