hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
5a0890f6cbde273a59488b1f6a2e1e7bb2e869e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> __global__ void kernel(int* array) { int index = blockIdx.x * blockDim.x + threadIdx.x; array[index] = index; } int main() { int num_element = 1024; int* host_array = (int*)malloc( num_element * sizeof(int) ); int* device_array; hipMalloc( (void**)&device_array , num_element * sizeof(int) ); int block_size = 128; int grid_size = num_element / block_size; hipLaunchKernelGGL(( kernel), dim3(grid_size),dim3(block_size), 0, 0, device_array); hipMemcpy(host_array,device_array,num_element * sizeof(int),hipMemcpyDeviceToHost); for(int i = 0 ; i < num_element ; i++) { printf("%d ",host_array[i]); } printf("\n"); free(host_array); hipFree(device_array); return 0; }
5a0890f6cbde273a59488b1f6a2e1e7bb2e869e4.cu
#include<stdio.h> __global__ void kernel(int* array) { int index = blockIdx.x * blockDim.x + threadIdx.x; array[index] = index; } int main() { int num_element = 1024; int* host_array = (int*)malloc( num_element * sizeof(int) ); int* device_array; cudaMalloc( (void**)&device_array , num_element * sizeof(int) ); int block_size = 128; int grid_size = num_element / block_size; kernel<<<grid_size,block_size>>>(device_array); cudaMemcpy(host_array,device_array,num_element * sizeof(int),cudaMemcpyDeviceToHost); for(int i = 0 ; i < num_element ; i++) { printf("%d ",host_array[i]); } printf("\n"); free(host_array); cudaFree(device_array); return 0; }
4b2abc2110bb750a4fb7ba045235f28289e0f4db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2022 by Contributors * \file array/cuda/rowwise_sampling_prob.cu * \brief weighted rowwise sampling. The degree computing kernels and * host-side functions are partially borrowed from the uniform rowwise * sampling code rowwise_sampling.cu. * \author pengqirong (OPPO), dlasalle and Xin from Nvidia. */ #include <dgl/random.h> #include <dgl/runtime/device_api.h> #include <hiprand/hiprand_kernel.h> #include <numeric> #include "./dgl_cub.cuh" #include "../../array/cuda/atomic.cuh" #include "../../runtime/cuda/cuda_common.h" // require CUB 1.17 to use DeviceSegmentedSort static_assert(CUB_VERSION >= 101700, "Require CUB >= 1.17 to use DeviceSegmentedSort"); using namespace dgl::aten::cuda; namespace dgl { namespace aten { namespace impl { namespace { constexpr int BLOCK_SIZE = 128; /** * @brief Compute the size of each row in the sampled CSR, without replacement. * temp_deg is calculated for rows with deg > num_picks. * For these rows, we will calculate their A-Res values and sort them to get top-num_picks. * * @tparam IdType The type of node and edge indexes. * @param num_picks The number of non-zero entries to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The index where each row's edges start. * @param out_deg The size of each row in the sampled matrix, as indexed by `in_rows` (output). * @param temp_deg The size of each row in the input matrix, as indexed by `in_rows` (output). */ template<typename IdType> __global__ void _CSRRowWiseSampleDegreeKernel( const int64_t num_picks, const int64_t num_rows, const IdType * const in_rows, const IdType * const in_ptr, IdType * const out_deg, IdType * const temp_deg) { const int64_t tIdx = threadIdx.x + blockIdx.x * blockDim.x; if (tIdx < num_rows) { const int64_t in_row = in_rows[tIdx]; const int64_t out_row = tIdx; const IdType deg = in_ptr[in_row + 1] - in_ptr[in_row]; // temp_deg is used to generate ares_ptr temp_deg[out_row] = deg > static_cast<IdType>(num_picks) ? deg : 0; out_deg[out_row] = min(static_cast<IdType>(num_picks), deg); if (out_row == num_rows - 1) { // make the prefixsum work out_deg[num_rows] = 0; temp_deg[num_rows] = 0; } } } /** * @brief Compute the size of each row in the sampled CSR, with replacement. * We need the actual in degree of each row to store CDF values. * * @tparam IdType The type of node and edge indexes. * @param num_picks The number of non-zero entries to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The index where each row's edges start. * @param out_deg The size of each row in the sampled matrix, as indexed by `in_rows` (output). * @param temp_deg The size of each row in the input matrix, as indexed by `in_rows` (output). */ template<typename IdType> __global__ void _CSRRowWiseSampleDegreeReplaceKernel( const int64_t num_picks, const int64_t num_rows, const IdType * const in_rows, const IdType * const in_ptr, IdType * const out_deg, IdType * const temp_deg) { const int64_t tIdx = threadIdx.x + blockIdx.x * blockDim.x; if (tIdx < num_rows) { const int64_t in_row = in_rows[tIdx]; const int64_t out_row = tIdx; const IdType deg = in_ptr[in_row + 1] - in_ptr[in_row]; temp_deg[out_row] = deg; out_deg[out_row] = deg == 0 ? 0 : static_cast<IdType>(num_picks); if (out_row == num_rows - 1) { // make the prefixsum work out_deg[num_rows] = 0; temp_deg[num_rows] = 0; } } } /** * @brief Equivalent to numpy expression: array[idx[off:off + len]] * * @tparam IdType The ID type used for indices. * @tparam FloatType The float type used for array values. * @param array The array to be selected. * @param idx_data The index mapping array. * @param index The index of value to be selected. * @param offset The offset to start. * @param out The selected value (output). */ template<typename IdType, typename FloatType> __device__ void _DoubleSlice( const FloatType * const array, const IdType * const idx_data, const IdType idx, const IdType offset, FloatType* const out) { if (idx_data) { *out = array[idx_data[offset + idx]]; } else { *out = array[offset + idx]; } } /** * @brief Compute A-Res value. A-Res value needs to be calculated only if deg * is greater than num_picks in weighted rowwise sampling without replacement. * * @tparam IdType The ID type used for matrices. * @tparam FloatType The Float type used for matrices. * @tparam TILE_SIZE The number of rows covered by each threadblock. * @param rand_seed The random seed to use. * @param num_picks The number of non-zeros to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The indptr array of the input CSR. * @param data The data array of the input CSR. * @param prob The probability array of the input CSR. * @param ares_ptr The offset to write each row to in the A-res array. * @param ares_idxs The A-Res value corresponding index array, the index of input CSR (output). * @param ares The A-Res value array (output). * @author pengqirong (OPPO) */ template<typename IdType, typename FloatType, int TILE_SIZE> __global__ void _CSRAResValueKernel( const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows, const IdType * const in_rows, const IdType * const in_ptr, const IdType * const data, const FloatType * const prob, const IdType * const ares_ptr, IdType * const ares_idxs, FloatType * const ares) { int64_t out_row = blockIdx.x * TILE_SIZE; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); hiprandStatePhilox4_32_10_t rng; hiprand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng); while (out_row < last_row) { const int64_t row = in_rows[out_row]; const int64_t in_row_start = in_ptr[row]; const int64_t deg = in_ptr[row + 1] - in_row_start; // A-Res value needs to be calculated only if deg is greater than num_picks // in weighted rowwise sampling without replacement if (deg > num_picks) { const int64_t ares_row_start = ares_ptr[out_row]; for (int64_t idx = threadIdx.x; idx < deg; idx += BLOCK_SIZE) { const int64_t in_idx = in_row_start + idx; const int64_t ares_idx = ares_row_start + idx; FloatType item_prob; _DoubleSlice<IdType, FloatType>(prob, data, idx, in_row_start, &item_prob); // compute A-Res value ares[ares_idx] = static_cast<FloatType>(__powf(hiprand_uniform(&rng), 1.0f / item_prob)); ares_idxs[ares_idx] = static_cast<IdType>(in_idx); } } out_row += 1; } } /** * @brief Perform weighted row-wise sampling on a CSR matrix, and generate a COO matrix, * without replacement. After sorting, we select top-num_picks items. * * @tparam IdType The ID type used for matrices. * @tparam FloatType The Float type used for matrices. * @tparam TILE_SIZE The number of rows covered by each threadblock. * @param num_picks The number of non-zeros to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The indptr array of the input CSR. * @param in_cols The columns array of the input CSR. * @param data The data array of the input CSR. * @param out_ptr The offset to write each row to in the output COO. * @param ares_ptr The offset to write each row to in the ares array. * @param sort_ares_idxs The sorted A-Res value corresponding index array, the index of input CSR. * @param out_rows The rows of the output COO (output). * @param out_cols The columns of the output COO (output). * @param out_idxs The data array of the output COO (output). * @author pengqirong (OPPO) */ template<typename IdType, typename FloatType, int TILE_SIZE> __global__ void _CSRRowWiseSampleKernel( const int64_t num_picks, const int64_t num_rows, const IdType * const in_rows, const IdType * const in_ptr, const IdType * const in_cols, const IdType * const data, const IdType * const out_ptr, const IdType * const ares_ptr, const IdType * const sort_ares_idxs, IdType * const out_rows, IdType * const out_cols, IdType * const out_idxs) { // we assign one warp per row assert(blockDim.x == BLOCK_SIZE); int64_t out_row = blockIdx.x * TILE_SIZE; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); while (out_row < last_row) { const int64_t row = in_rows[out_row]; const int64_t in_row_start = in_ptr[row]; const int64_t out_row_start = out_ptr[out_row]; const int64_t deg = in_ptr[row + 1] - in_row_start; if (deg > num_picks) { const int64_t ares_row_start = ares_ptr[out_row]; for (int64_t idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) { // get in and out index, the in_idx is one of top num_picks A-Res value // corresponding index in input CSR. const int64_t out_idx = out_row_start + idx; const int64_t ares_idx = ares_row_start + idx; const int64_t in_idx = sort_ares_idxs[ares_idx]; // copy permutation over out_rows[out_idx] = static_cast<IdType>(row); out_cols[out_idx] = in_cols[in_idx]; out_idxs[out_idx] = static_cast<IdType>(data ? data[in_idx] : in_idx); } } else { for (int64_t idx = threadIdx.x; idx < deg; idx += BLOCK_SIZE) { // get in and out index const int64_t out_idx = out_row_start + idx; const int64_t in_idx = in_row_start + idx; // copy permutation over out_rows[out_idx] = static_cast<IdType>(row); out_cols[out_idx] = in_cols[in_idx]; out_idxs[out_idx] = static_cast<IdType>(data ? data[in_idx] : in_idx); } } out_row += 1; } } // A stateful callback functor that maintains a running prefix to be applied // during consecutive scan operations. template<typename FloatType> struct BlockPrefixCallbackOp { // Running prefix FloatType running_total; // Constructor __device__ BlockPrefixCallbackOp(FloatType running_total) : running_total(running_total) {} // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide scan. __device__ FloatType operator()(FloatType block_aggregate) { FloatType old_prefix = running_total; running_total += block_aggregate; return old_prefix; } }; /** * @brief Perform weighted row-wise sampling on a CSR matrix, and generate a COO matrix, * with replacement. We store the CDF (unnormalized) of all neighbors of a row * in global memory and use binary search to find inverse indices as selected items. * * @tparam IdType The ID type used for matrices. * @tparam FloatType The Float type used for matrices. * @tparam TILE_SIZE The number of rows covered by each threadblock. * @param rand_seed The random seed to use. * @param num_picks The number of non-zeros to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The indptr array of the input CSR. * @param in_cols The columns array of the input CSR. * @param data The data array of the input CSR. * @param prob The probability array of the input CSR. * @param out_ptr The offset to write each row to in the output COO. * @param cdf_ptr The offset of each cdf segment. * @param cdf The global buffer to store cdf segments. * @param out_rows The rows of the output COO (output). * @param out_cols The columns of the output COO (output). * @param out_idxs The data array of the output COO (output). * @author pengqirong (OPPO) */ template<typename IdType, typename FloatType, int TILE_SIZE> __global__ void _CSRRowWiseSampleReplaceKernel( const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows, const IdType * const in_rows, const IdType * const in_ptr, const IdType * const in_cols, const IdType * const data, const FloatType * const prob, const IdType * const out_ptr, const IdType * const cdf_ptr, FloatType * const cdf, IdType * const out_rows, IdType * const out_cols, IdType * const out_idxs ) { // we assign one warp per row assert(blockDim.x == BLOCK_SIZE); int64_t out_row = blockIdx.x * TILE_SIZE; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); hiprandStatePhilox4_32_10_t rng; hiprand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng); while (out_row < last_row) { const int64_t row = in_rows[out_row]; const int64_t in_row_start = in_ptr[row]; const int64_t out_row_start = out_ptr[out_row]; const int64_t cdf_row_start = cdf_ptr[out_row]; const int64_t deg = in_ptr[row + 1] - in_row_start; const FloatType MIN_THREAD_DATA = static_cast<FloatType>(0.0f); if (deg > 0) { // Specialize BlockScan for a 1D block of BLOCK_SIZE threads typedef hipcub::BlockScan<FloatType, BLOCK_SIZE> BlockScan; // Allocate shared memory for BlockScan __shared__ typename BlockScan::TempStorage temp_storage; // Initialize running total BlockPrefixCallbackOp<FloatType> prefix_op(MIN_THREAD_DATA); int64_t max_iter = (1 + (deg - 1) / BLOCK_SIZE) * BLOCK_SIZE; // Have the block iterate over segments of items for (int64_t idx = threadIdx.x; idx < max_iter; idx += BLOCK_SIZE) { // Load a segment of consecutive items that are blocked across threads FloatType thread_data; if (idx < deg) _DoubleSlice<IdType, FloatType>(prob, data, idx, in_row_start, &thread_data); else thread_data = MIN_THREAD_DATA; thread_data = max(thread_data, MIN_THREAD_DATA); // Collectively compute the block-wide inclusive prefix sum BlockScan(temp_storage).InclusiveSum(thread_data, thread_data, prefix_op); __syncthreads(); // Store scanned items to cdf array if (idx < deg) { cdf[cdf_row_start + idx] = thread_data; } } __syncthreads(); for (int64_t idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) { // get random value FloatType sum = cdf[cdf_row_start + deg - 1]; FloatType rand = static_cast<FloatType>(hiprand_uniform(&rng) * sum); // get the offset of the first value within cdf array which is greater than random value. int64_t item = cub::UpperBound<FloatType*, int64_t, FloatType>( &cdf[cdf_row_start], deg, rand); item = min(item, deg - 1); // get in and out index const int64_t in_idx = in_row_start + item; const int64_t out_idx = out_row_start + idx; // copy permutation over out_rows[out_idx] = static_cast<IdType>(row); out_cols[out_idx] = in_cols[in_idx]; out_idxs[out_idx] = static_cast<IdType>(data ? data[in_idx] : in_idx); } } out_row += 1; } } } // namespace /////////////////////////////// CSR /////////////////////////////// /** * @brief Perform weighted row-wise sampling on a CSR matrix, and generate a COO matrix. * Use CDF sampling algorithm for with replacement: * 1) Calculate the CDF of all neighbor's prob. * 2) For each [0, num_picks), generate a rand ~ U(0, 1). * Use binary search to find its index in the CDF array as a chosen item. * Use A-Res sampling algorithm for without replacement: * 1) For rows with deg > num_picks, calculate A-Res values for all neighbors. * 2) Sort the A-Res array and select top-num_picks as chosen items. * * @tparam XPU The device type used for matrices. * @tparam IdType The ID type used for matrices. * @tparam FloatType The Float type used for matrices. * @param mat The CSR matrix. * @param rows The set of rows to pick. * @param num_picks The number of non-zeros to pick per row. * @param prob The probability array of the input CSR. * @param replace Is replacement sampling? * @author pengqirong (OPPO), dlasalle and Xin from Nvidia. */ template <DLDeviceType XPU, typename IdType, typename FloatType> COOMatrix CSRRowWiseSampling(CSRMatrix mat, IdArray rows, int64_t num_picks, FloatArray prob, bool replace) { const auto& ctx = rows->ctx; auto device = runtime::DeviceAPI::Get(ctx); // TODO(dlasalle): Once the device api supports getting the stream from the // context, that should be used instead of the default stream here. hipStream_t stream = 0; const int64_t num_rows = rows->shape[0]; const IdType * const slice_rows = static_cast<const IdType*>(rows->data); IdArray picked_row = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); IdArray picked_col = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); IdArray picked_idx = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); const IdType * const in_ptr = static_cast<const IdType*>(mat.indptr->data); const IdType * const in_cols = static_cast<const IdType*>(mat.indices->data); IdType* const out_rows = static_cast<IdType*>(picked_row->data); IdType* const out_cols = static_cast<IdType*>(picked_col->data); IdType* const out_idxs = static_cast<IdType*>(picked_idx->data); const IdType* const data = CSRHasData(mat) ? static_cast<IdType*>(mat.data->data) : nullptr; const FloatType* const prob_data = static_cast<const FloatType*>(prob->data); // compute degree // out_deg: the size of each row in the sampled matrix // temp_deg: the size of each row we will manipulate in sampling // 1) for w/o replacement: in degree if it's greater than num_picks else 0 // 2) for w/ replacement: in degree IdType * out_deg = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType))); IdType * temp_deg = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType))); if (replace) { const dim3 block(512); const dim3 grid((num_rows + block.x - 1) / block.x); CUDA_KERNEL_CALL( _CSRRowWiseSampleDegreeReplaceKernel, grid, block, 0, stream, num_picks, num_rows, slice_rows, in_ptr, out_deg, temp_deg); } else { const dim3 block(512); const dim3 grid((num_rows + block.x - 1) / block.x); CUDA_KERNEL_CALL( _CSRRowWiseSampleDegreeKernel, grid, block, 0, stream, num_picks, num_rows, slice_rows, in_ptr, out_deg, temp_deg); } // fill temp_ptr IdType * temp_ptr = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows + 1)*sizeof(IdType))); size_t prefix_temp_size = 0; CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(nullptr, prefix_temp_size, temp_deg, temp_ptr, num_rows + 1, stream)); void * prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size); CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(prefix_temp, prefix_temp_size, temp_deg, temp_ptr, num_rows + 1, stream)); device->FreeWorkspace(ctx, prefix_temp); device->FreeWorkspace(ctx, temp_deg); // TODO(Xin): The copy here is too small, and the overhead of creating // cuda events cannot be ignored. Just use synchronized copy. IdType temp_len; device->CopyDataFromTo(temp_ptr, num_rows * sizeof(temp_len), &temp_len, 0, sizeof(temp_len), ctx, DGLContext{kDLCPU, 0}, mat.indptr->dtype, stream); device->StreamSync(ctx, stream); // fill out_ptr IdType * out_ptr = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows+1)*sizeof(IdType))); prefix_temp_size = 0; CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(nullptr, prefix_temp_size, out_deg, out_ptr, num_rows+1, stream)); prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size); CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(prefix_temp, prefix_temp_size, out_deg, out_ptr, num_rows+1, stream)); device->FreeWorkspace(ctx, prefix_temp); device->FreeWorkspace(ctx, out_deg); hipEvent_t copyEvent; CUDA_CALL(hipEventCreate(&copyEvent)); // TODO(dlasalle): use pinned memory to overlap with the actual sampling, and wait on // a cudaevent IdType new_len; device->CopyDataFromTo(out_ptr, num_rows * sizeof(new_len), &new_len, 0, sizeof(new_len), ctx, DGLContext{kDLCPU, 0}, mat.indptr->dtype, stream); CUDA_CALL(hipEventRecord(copyEvent, stream)); // allocate workspace // 1) for w/ replacement, it's a global buffer to store cdf segments (one segment for each row). // 2) for w/o replacement, it's used to store a-res segments (one segment for // each row with degree > num_picks) FloatType * temp = static_cast<FloatType*>( device->AllocWorkspace(ctx, temp_len * sizeof(FloatType))); const uint64_t rand_seed = RandomEngine::ThreadLocal()->RandInt(1000000000); // select edges // the number of rows each thread block will cover constexpr int TILE_SIZE = 128 / BLOCK_SIZE; if (replace) { // with replacement. const dim3 block(BLOCK_SIZE); const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE); CUDA_KERNEL_CALL( (_CSRRowWiseSampleReplaceKernel<IdType, FloatType, TILE_SIZE>), grid, block, 0, stream, rand_seed, num_picks, num_rows, slice_rows, in_ptr, in_cols, data, prob_data, out_ptr, temp_ptr, temp, out_rows, out_cols, out_idxs); device->FreeWorkspace(ctx, temp); } else { // without replacement IdType* temp_idxs = static_cast<IdType*>( device->AllocWorkspace(ctx, (temp_len) * sizeof(IdType))); // Compute A-Res value. A-Res value needs to be calculated only if deg // is greater than num_picks in weighted rowwise sampling without replacement. const dim3 block(BLOCK_SIZE); const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE); CUDA_KERNEL_CALL( (_CSRAResValueKernel<IdType, FloatType, TILE_SIZE>), grid, block, 0, stream, rand_seed, num_picks, num_rows, slice_rows, in_ptr, data, prob_data, temp_ptr, temp_idxs, temp); // sort A-Res value array. FloatType* sort_temp = static_cast<FloatType*>( device->AllocWorkspace(ctx, temp_len * sizeof(FloatType))); IdType* sort_temp_idxs = static_cast<IdType*>( device->AllocWorkspace(ctx, temp_len * sizeof(IdType))); cub::DoubleBuffer<FloatType> sort_keys(temp, sort_temp); cub::DoubleBuffer<IdType> sort_values(temp_idxs, sort_temp_idxs); void *d_temp_storage = nullptr; size_t temp_storage_bytes = 0; CUDA_CALL(cub::DeviceSegmentedSort::SortPairsDescending( d_temp_storage, temp_storage_bytes, sort_keys, sort_values, temp_len, num_rows, temp_ptr, temp_ptr + 1)); d_temp_storage = device->AllocWorkspace(ctx, temp_storage_bytes); CUDA_CALL(cub::DeviceSegmentedSort::SortPairsDescending( d_temp_storage, temp_storage_bytes, sort_keys, sort_values, temp_len, num_rows, temp_ptr, temp_ptr + 1)); device->FreeWorkspace(ctx, d_temp_storage); device->FreeWorkspace(ctx, temp); device->FreeWorkspace(ctx, temp_idxs); device->FreeWorkspace(ctx, sort_temp); device->FreeWorkspace(ctx, sort_temp_idxs); // select tok-num_picks as results CUDA_KERNEL_CALL( (_CSRRowWiseSampleKernel<IdType, FloatType, TILE_SIZE>), grid, block, 0, stream, num_picks, num_rows, slice_rows, in_ptr, in_cols, data, out_ptr, temp_ptr, sort_values.Current(), out_rows, out_cols, out_idxs); } device->FreeWorkspace(ctx, temp_ptr); device->FreeWorkspace(ctx, out_ptr); // wait for copying `new_len` to finish CUDA_CALL(hipEventSynchronize(copyEvent)); CUDA_CALL(hipEventDestroy(copyEvent)); picked_row = picked_row.CreateView({new_len}, picked_row->dtype); picked_col = picked_col.CreateView({new_len}, picked_col->dtype); picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype); return COOMatrix(mat.num_rows, mat.num_cols, picked_row, picked_col, picked_idx); } template COOMatrix CSRRowWiseSampling<kDLGPU, int32_t, float>( CSRMatrix, IdArray, int64_t, FloatArray, bool); template COOMatrix CSRRowWiseSampling<kDLGPU, int64_t, float>( CSRMatrix, IdArray, int64_t, FloatArray, bool); template COOMatrix CSRRowWiseSampling<kDLGPU, int32_t, double>( CSRMatrix, IdArray, int64_t, FloatArray, bool); template COOMatrix CSRRowWiseSampling<kDLGPU, int64_t, double>( CSRMatrix, IdArray, int64_t, FloatArray, bool); } // namespace impl } // namespace aten } // namespace dgl
4b2abc2110bb750a4fb7ba045235f28289e0f4db.cu
/*! * Copyright (c) 2022 by Contributors * \file array/cuda/rowwise_sampling_prob.cu * \brief weighted rowwise sampling. The degree computing kernels and * host-side functions are partially borrowed from the uniform rowwise * sampling code rowwise_sampling.cu. * \author pengqirong (OPPO), dlasalle and Xin from Nvidia. */ #include <dgl/random.h> #include <dgl/runtime/device_api.h> #include <curand_kernel.h> #include <numeric> #include "./dgl_cub.cuh" #include "../../array/cuda/atomic.cuh" #include "../../runtime/cuda/cuda_common.h" // require CUB 1.17 to use DeviceSegmentedSort static_assert(CUB_VERSION >= 101700, "Require CUB >= 1.17 to use DeviceSegmentedSort"); using namespace dgl::aten::cuda; namespace dgl { namespace aten { namespace impl { namespace { constexpr int BLOCK_SIZE = 128; /** * @brief Compute the size of each row in the sampled CSR, without replacement. * temp_deg is calculated for rows with deg > num_picks. * For these rows, we will calculate their A-Res values and sort them to get top-num_picks. * * @tparam IdType The type of node and edge indexes. * @param num_picks The number of non-zero entries to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The index where each row's edges start. * @param out_deg The size of each row in the sampled matrix, as indexed by `in_rows` (output). * @param temp_deg The size of each row in the input matrix, as indexed by `in_rows` (output). */ template<typename IdType> __global__ void _CSRRowWiseSampleDegreeKernel( const int64_t num_picks, const int64_t num_rows, const IdType * const in_rows, const IdType * const in_ptr, IdType * const out_deg, IdType * const temp_deg) { const int64_t tIdx = threadIdx.x + blockIdx.x * blockDim.x; if (tIdx < num_rows) { const int64_t in_row = in_rows[tIdx]; const int64_t out_row = tIdx; const IdType deg = in_ptr[in_row + 1] - in_ptr[in_row]; // temp_deg is used to generate ares_ptr temp_deg[out_row] = deg > static_cast<IdType>(num_picks) ? deg : 0; out_deg[out_row] = min(static_cast<IdType>(num_picks), deg); if (out_row == num_rows - 1) { // make the prefixsum work out_deg[num_rows] = 0; temp_deg[num_rows] = 0; } } } /** * @brief Compute the size of each row in the sampled CSR, with replacement. * We need the actual in degree of each row to store CDF values. * * @tparam IdType The type of node and edge indexes. * @param num_picks The number of non-zero entries to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The index where each row's edges start. * @param out_deg The size of each row in the sampled matrix, as indexed by `in_rows` (output). * @param temp_deg The size of each row in the input matrix, as indexed by `in_rows` (output). */ template<typename IdType> __global__ void _CSRRowWiseSampleDegreeReplaceKernel( const int64_t num_picks, const int64_t num_rows, const IdType * const in_rows, const IdType * const in_ptr, IdType * const out_deg, IdType * const temp_deg) { const int64_t tIdx = threadIdx.x + blockIdx.x * blockDim.x; if (tIdx < num_rows) { const int64_t in_row = in_rows[tIdx]; const int64_t out_row = tIdx; const IdType deg = in_ptr[in_row + 1] - in_ptr[in_row]; temp_deg[out_row] = deg; out_deg[out_row] = deg == 0 ? 0 : static_cast<IdType>(num_picks); if (out_row == num_rows - 1) { // make the prefixsum work out_deg[num_rows] = 0; temp_deg[num_rows] = 0; } } } /** * @brief Equivalent to numpy expression: array[idx[off:off + len]] * * @tparam IdType The ID type used for indices. * @tparam FloatType The float type used for array values. * @param array The array to be selected. * @param idx_data The index mapping array. * @param index The index of value to be selected. * @param offset The offset to start. * @param out The selected value (output). */ template<typename IdType, typename FloatType> __device__ void _DoubleSlice( const FloatType * const array, const IdType * const idx_data, const IdType idx, const IdType offset, FloatType* const out) { if (idx_data) { *out = array[idx_data[offset + idx]]; } else { *out = array[offset + idx]; } } /** * @brief Compute A-Res value. A-Res value needs to be calculated only if deg * is greater than num_picks in weighted rowwise sampling without replacement. * * @tparam IdType The ID type used for matrices. * @tparam FloatType The Float type used for matrices. * @tparam TILE_SIZE The number of rows covered by each threadblock. * @param rand_seed The random seed to use. * @param num_picks The number of non-zeros to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The indptr array of the input CSR. * @param data The data array of the input CSR. * @param prob The probability array of the input CSR. * @param ares_ptr The offset to write each row to in the A-res array. * @param ares_idxs The A-Res value corresponding index array, the index of input CSR (output). * @param ares The A-Res value array (output). * @author pengqirong (OPPO) */ template<typename IdType, typename FloatType, int TILE_SIZE> __global__ void _CSRAResValueKernel( const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows, const IdType * const in_rows, const IdType * const in_ptr, const IdType * const data, const FloatType * const prob, const IdType * const ares_ptr, IdType * const ares_idxs, FloatType * const ares) { int64_t out_row = blockIdx.x * TILE_SIZE; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); curandStatePhilox4_32_10_t rng; curand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng); while (out_row < last_row) { const int64_t row = in_rows[out_row]; const int64_t in_row_start = in_ptr[row]; const int64_t deg = in_ptr[row + 1] - in_row_start; // A-Res value needs to be calculated only if deg is greater than num_picks // in weighted rowwise sampling without replacement if (deg > num_picks) { const int64_t ares_row_start = ares_ptr[out_row]; for (int64_t idx = threadIdx.x; idx < deg; idx += BLOCK_SIZE) { const int64_t in_idx = in_row_start + idx; const int64_t ares_idx = ares_row_start + idx; FloatType item_prob; _DoubleSlice<IdType, FloatType>(prob, data, idx, in_row_start, &item_prob); // compute A-Res value ares[ares_idx] = static_cast<FloatType>(__powf(curand_uniform(&rng), 1.0f / item_prob)); ares_idxs[ares_idx] = static_cast<IdType>(in_idx); } } out_row += 1; } } /** * @brief Perform weighted row-wise sampling on a CSR matrix, and generate a COO matrix, * without replacement. After sorting, we select top-num_picks items. * * @tparam IdType The ID type used for matrices. * @tparam FloatType The Float type used for matrices. * @tparam TILE_SIZE The number of rows covered by each threadblock. * @param num_picks The number of non-zeros to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The indptr array of the input CSR. * @param in_cols The columns array of the input CSR. * @param data The data array of the input CSR. * @param out_ptr The offset to write each row to in the output COO. * @param ares_ptr The offset to write each row to in the ares array. * @param sort_ares_idxs The sorted A-Res value corresponding index array, the index of input CSR. * @param out_rows The rows of the output COO (output). * @param out_cols The columns of the output COO (output). * @param out_idxs The data array of the output COO (output). * @author pengqirong (OPPO) */ template<typename IdType, typename FloatType, int TILE_SIZE> __global__ void _CSRRowWiseSampleKernel( const int64_t num_picks, const int64_t num_rows, const IdType * const in_rows, const IdType * const in_ptr, const IdType * const in_cols, const IdType * const data, const IdType * const out_ptr, const IdType * const ares_ptr, const IdType * const sort_ares_idxs, IdType * const out_rows, IdType * const out_cols, IdType * const out_idxs) { // we assign one warp per row assert(blockDim.x == BLOCK_SIZE); int64_t out_row = blockIdx.x * TILE_SIZE; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); while (out_row < last_row) { const int64_t row = in_rows[out_row]; const int64_t in_row_start = in_ptr[row]; const int64_t out_row_start = out_ptr[out_row]; const int64_t deg = in_ptr[row + 1] - in_row_start; if (deg > num_picks) { const int64_t ares_row_start = ares_ptr[out_row]; for (int64_t idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) { // get in and out index, the in_idx is one of top num_picks A-Res value // corresponding index in input CSR. const int64_t out_idx = out_row_start + idx; const int64_t ares_idx = ares_row_start + idx; const int64_t in_idx = sort_ares_idxs[ares_idx]; // copy permutation over out_rows[out_idx] = static_cast<IdType>(row); out_cols[out_idx] = in_cols[in_idx]; out_idxs[out_idx] = static_cast<IdType>(data ? data[in_idx] : in_idx); } } else { for (int64_t idx = threadIdx.x; idx < deg; idx += BLOCK_SIZE) { // get in and out index const int64_t out_idx = out_row_start + idx; const int64_t in_idx = in_row_start + idx; // copy permutation over out_rows[out_idx] = static_cast<IdType>(row); out_cols[out_idx] = in_cols[in_idx]; out_idxs[out_idx] = static_cast<IdType>(data ? data[in_idx] : in_idx); } } out_row += 1; } } // A stateful callback functor that maintains a running prefix to be applied // during consecutive scan operations. template<typename FloatType> struct BlockPrefixCallbackOp { // Running prefix FloatType running_total; // Constructor __device__ BlockPrefixCallbackOp(FloatType running_total) : running_total(running_total) {} // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide scan. __device__ FloatType operator()(FloatType block_aggregate) { FloatType old_prefix = running_total; running_total += block_aggregate; return old_prefix; } }; /** * @brief Perform weighted row-wise sampling on a CSR matrix, and generate a COO matrix, * with replacement. We store the CDF (unnormalized) of all neighbors of a row * in global memory and use binary search to find inverse indices as selected items. * * @tparam IdType The ID type used for matrices. * @tparam FloatType The Float type used for matrices. * @tparam TILE_SIZE The number of rows covered by each threadblock. * @param rand_seed The random seed to use. * @param num_picks The number of non-zeros to pick per row. * @param num_rows The number of rows to pick. * @param in_rows The set of rows to pick. * @param in_ptr The indptr array of the input CSR. * @param in_cols The columns array of the input CSR. * @param data The data array of the input CSR. * @param prob The probability array of the input CSR. * @param out_ptr The offset to write each row to in the output COO. * @param cdf_ptr The offset of each cdf segment. * @param cdf The global buffer to store cdf segments. * @param out_rows The rows of the output COO (output). * @param out_cols The columns of the output COO (output). * @param out_idxs The data array of the output COO (output). * @author pengqirong (OPPO) */ template<typename IdType, typename FloatType, int TILE_SIZE> __global__ void _CSRRowWiseSampleReplaceKernel( const uint64_t rand_seed, const int64_t num_picks, const int64_t num_rows, const IdType * const in_rows, const IdType * const in_ptr, const IdType * const in_cols, const IdType * const data, const FloatType * const prob, const IdType * const out_ptr, const IdType * const cdf_ptr, FloatType * const cdf, IdType * const out_rows, IdType * const out_cols, IdType * const out_idxs ) { // we assign one warp per row assert(blockDim.x == BLOCK_SIZE); int64_t out_row = blockIdx.x * TILE_SIZE; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); curandStatePhilox4_32_10_t rng; curand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng); while (out_row < last_row) { const int64_t row = in_rows[out_row]; const int64_t in_row_start = in_ptr[row]; const int64_t out_row_start = out_ptr[out_row]; const int64_t cdf_row_start = cdf_ptr[out_row]; const int64_t deg = in_ptr[row + 1] - in_row_start; const FloatType MIN_THREAD_DATA = static_cast<FloatType>(0.0f); if (deg > 0) { // Specialize BlockScan for a 1D block of BLOCK_SIZE threads typedef cub::BlockScan<FloatType, BLOCK_SIZE> BlockScan; // Allocate shared memory for BlockScan __shared__ typename BlockScan::TempStorage temp_storage; // Initialize running total BlockPrefixCallbackOp<FloatType> prefix_op(MIN_THREAD_DATA); int64_t max_iter = (1 + (deg - 1) / BLOCK_SIZE) * BLOCK_SIZE; // Have the block iterate over segments of items for (int64_t idx = threadIdx.x; idx < max_iter; idx += BLOCK_SIZE) { // Load a segment of consecutive items that are blocked across threads FloatType thread_data; if (idx < deg) _DoubleSlice<IdType, FloatType>(prob, data, idx, in_row_start, &thread_data); else thread_data = MIN_THREAD_DATA; thread_data = max(thread_data, MIN_THREAD_DATA); // Collectively compute the block-wide inclusive prefix sum BlockScan(temp_storage).InclusiveSum(thread_data, thread_data, prefix_op); __syncthreads(); // Store scanned items to cdf array if (idx < deg) { cdf[cdf_row_start + idx] = thread_data; } } __syncthreads(); for (int64_t idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) { // get random value FloatType sum = cdf[cdf_row_start + deg - 1]; FloatType rand = static_cast<FloatType>(curand_uniform(&rng) * sum); // get the offset of the first value within cdf array which is greater than random value. int64_t item = cub::UpperBound<FloatType*, int64_t, FloatType>( &cdf[cdf_row_start], deg, rand); item = min(item, deg - 1); // get in and out index const int64_t in_idx = in_row_start + item; const int64_t out_idx = out_row_start + idx; // copy permutation over out_rows[out_idx] = static_cast<IdType>(row); out_cols[out_idx] = in_cols[in_idx]; out_idxs[out_idx] = static_cast<IdType>(data ? data[in_idx] : in_idx); } } out_row += 1; } } } // namespace /////////////////////////////// CSR /////////////////////////////// /** * @brief Perform weighted row-wise sampling on a CSR matrix, and generate a COO matrix. * Use CDF sampling algorithm for with replacement: * 1) Calculate the CDF of all neighbor's prob. * 2) For each [0, num_picks), generate a rand ~ U(0, 1). * Use binary search to find its index in the CDF array as a chosen item. * Use A-Res sampling algorithm for without replacement: * 1) For rows with deg > num_picks, calculate A-Res values for all neighbors. * 2) Sort the A-Res array and select top-num_picks as chosen items. * * @tparam XPU The device type used for matrices. * @tparam IdType The ID type used for matrices. * @tparam FloatType The Float type used for matrices. * @param mat The CSR matrix. * @param rows The set of rows to pick. * @param num_picks The number of non-zeros to pick per row. * @param prob The probability array of the input CSR. * @param replace Is replacement sampling? * @author pengqirong (OPPO), dlasalle and Xin from Nvidia. */ template <DLDeviceType XPU, typename IdType, typename FloatType> COOMatrix CSRRowWiseSampling(CSRMatrix mat, IdArray rows, int64_t num_picks, FloatArray prob, bool replace) { const auto& ctx = rows->ctx; auto device = runtime::DeviceAPI::Get(ctx); // TODO(dlasalle): Once the device api supports getting the stream from the // context, that should be used instead of the default stream here. cudaStream_t stream = 0; const int64_t num_rows = rows->shape[0]; const IdType * const slice_rows = static_cast<const IdType*>(rows->data); IdArray picked_row = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); IdArray picked_col = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); IdArray picked_idx = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8); const IdType * const in_ptr = static_cast<const IdType*>(mat.indptr->data); const IdType * const in_cols = static_cast<const IdType*>(mat.indices->data); IdType* const out_rows = static_cast<IdType*>(picked_row->data); IdType* const out_cols = static_cast<IdType*>(picked_col->data); IdType* const out_idxs = static_cast<IdType*>(picked_idx->data); const IdType* const data = CSRHasData(mat) ? static_cast<IdType*>(mat.data->data) : nullptr; const FloatType* const prob_data = static_cast<const FloatType*>(prob->data); // compute degree // out_deg: the size of each row in the sampled matrix // temp_deg: the size of each row we will manipulate in sampling // 1) for w/o replacement: in degree if it's greater than num_picks else 0 // 2) for w/ replacement: in degree IdType * out_deg = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType))); IdType * temp_deg = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType))); if (replace) { const dim3 block(512); const dim3 grid((num_rows + block.x - 1) / block.x); CUDA_KERNEL_CALL( _CSRRowWiseSampleDegreeReplaceKernel, grid, block, 0, stream, num_picks, num_rows, slice_rows, in_ptr, out_deg, temp_deg); } else { const dim3 block(512); const dim3 grid((num_rows + block.x - 1) / block.x); CUDA_KERNEL_CALL( _CSRRowWiseSampleDegreeKernel, grid, block, 0, stream, num_picks, num_rows, slice_rows, in_ptr, out_deg, temp_deg); } // fill temp_ptr IdType * temp_ptr = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows + 1)*sizeof(IdType))); size_t prefix_temp_size = 0; CUDA_CALL(cub::DeviceScan::ExclusiveSum(nullptr, prefix_temp_size, temp_deg, temp_ptr, num_rows + 1, stream)); void * prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size); CUDA_CALL(cub::DeviceScan::ExclusiveSum(prefix_temp, prefix_temp_size, temp_deg, temp_ptr, num_rows + 1, stream)); device->FreeWorkspace(ctx, prefix_temp); device->FreeWorkspace(ctx, temp_deg); // TODO(Xin): The copy here is too small, and the overhead of creating // cuda events cannot be ignored. Just use synchronized copy. IdType temp_len; device->CopyDataFromTo(temp_ptr, num_rows * sizeof(temp_len), &temp_len, 0, sizeof(temp_len), ctx, DGLContext{kDLCPU, 0}, mat.indptr->dtype, stream); device->StreamSync(ctx, stream); // fill out_ptr IdType * out_ptr = static_cast<IdType*>( device->AllocWorkspace(ctx, (num_rows+1)*sizeof(IdType))); prefix_temp_size = 0; CUDA_CALL(cub::DeviceScan::ExclusiveSum(nullptr, prefix_temp_size, out_deg, out_ptr, num_rows+1, stream)); prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size); CUDA_CALL(cub::DeviceScan::ExclusiveSum(prefix_temp, prefix_temp_size, out_deg, out_ptr, num_rows+1, stream)); device->FreeWorkspace(ctx, prefix_temp); device->FreeWorkspace(ctx, out_deg); cudaEvent_t copyEvent; CUDA_CALL(cudaEventCreate(&copyEvent)); // TODO(dlasalle): use pinned memory to overlap with the actual sampling, and wait on // a cudaevent IdType new_len; device->CopyDataFromTo(out_ptr, num_rows * sizeof(new_len), &new_len, 0, sizeof(new_len), ctx, DGLContext{kDLCPU, 0}, mat.indptr->dtype, stream); CUDA_CALL(cudaEventRecord(copyEvent, stream)); // allocate workspace // 1) for w/ replacement, it's a global buffer to store cdf segments (one segment for each row). // 2) for w/o replacement, it's used to store a-res segments (one segment for // each row with degree > num_picks) FloatType * temp = static_cast<FloatType*>( device->AllocWorkspace(ctx, temp_len * sizeof(FloatType))); const uint64_t rand_seed = RandomEngine::ThreadLocal()->RandInt(1000000000); // select edges // the number of rows each thread block will cover constexpr int TILE_SIZE = 128 / BLOCK_SIZE; if (replace) { // with replacement. const dim3 block(BLOCK_SIZE); const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE); CUDA_KERNEL_CALL( (_CSRRowWiseSampleReplaceKernel<IdType, FloatType, TILE_SIZE>), grid, block, 0, stream, rand_seed, num_picks, num_rows, slice_rows, in_ptr, in_cols, data, prob_data, out_ptr, temp_ptr, temp, out_rows, out_cols, out_idxs); device->FreeWorkspace(ctx, temp); } else { // without replacement IdType* temp_idxs = static_cast<IdType*>( device->AllocWorkspace(ctx, (temp_len) * sizeof(IdType))); // Compute A-Res value. A-Res value needs to be calculated only if deg // is greater than num_picks in weighted rowwise sampling without replacement. const dim3 block(BLOCK_SIZE); const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE); CUDA_KERNEL_CALL( (_CSRAResValueKernel<IdType, FloatType, TILE_SIZE>), grid, block, 0, stream, rand_seed, num_picks, num_rows, slice_rows, in_ptr, data, prob_data, temp_ptr, temp_idxs, temp); // sort A-Res value array. FloatType* sort_temp = static_cast<FloatType*>( device->AllocWorkspace(ctx, temp_len * sizeof(FloatType))); IdType* sort_temp_idxs = static_cast<IdType*>( device->AllocWorkspace(ctx, temp_len * sizeof(IdType))); cub::DoubleBuffer<FloatType> sort_keys(temp, sort_temp); cub::DoubleBuffer<IdType> sort_values(temp_idxs, sort_temp_idxs); void *d_temp_storage = nullptr; size_t temp_storage_bytes = 0; CUDA_CALL(cub::DeviceSegmentedSort::SortPairsDescending( d_temp_storage, temp_storage_bytes, sort_keys, sort_values, temp_len, num_rows, temp_ptr, temp_ptr + 1)); d_temp_storage = device->AllocWorkspace(ctx, temp_storage_bytes); CUDA_CALL(cub::DeviceSegmentedSort::SortPairsDescending( d_temp_storage, temp_storage_bytes, sort_keys, sort_values, temp_len, num_rows, temp_ptr, temp_ptr + 1)); device->FreeWorkspace(ctx, d_temp_storage); device->FreeWorkspace(ctx, temp); device->FreeWorkspace(ctx, temp_idxs); device->FreeWorkspace(ctx, sort_temp); device->FreeWorkspace(ctx, sort_temp_idxs); // select tok-num_picks as results CUDA_KERNEL_CALL( (_CSRRowWiseSampleKernel<IdType, FloatType, TILE_SIZE>), grid, block, 0, stream, num_picks, num_rows, slice_rows, in_ptr, in_cols, data, out_ptr, temp_ptr, sort_values.Current(), out_rows, out_cols, out_idxs); } device->FreeWorkspace(ctx, temp_ptr); device->FreeWorkspace(ctx, out_ptr); // wait for copying `new_len` to finish CUDA_CALL(cudaEventSynchronize(copyEvent)); CUDA_CALL(cudaEventDestroy(copyEvent)); picked_row = picked_row.CreateView({new_len}, picked_row->dtype); picked_col = picked_col.CreateView({new_len}, picked_col->dtype); picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype); return COOMatrix(mat.num_rows, mat.num_cols, picked_row, picked_col, picked_idx); } template COOMatrix CSRRowWiseSampling<kDLGPU, int32_t, float>( CSRMatrix, IdArray, int64_t, FloatArray, bool); template COOMatrix CSRRowWiseSampling<kDLGPU, int64_t, float>( CSRMatrix, IdArray, int64_t, FloatArray, bool); template COOMatrix CSRRowWiseSampling<kDLGPU, int32_t, double>( CSRMatrix, IdArray, int64_t, FloatArray, bool); template COOMatrix CSRRowWiseSampling<kDLGPU, int64_t, double>( CSRMatrix, IdArray, int64_t, FloatArray, bool); } // namespace impl } // namespace aten } // namespace dgl
cdb74f9285d8da64e0857848314bdd1aacb4858d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void metropolisPoposal2 ( const int dim, const int nwl, const int isb, const float *xx, const float *rr, float *xx1 ) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int t = i + j * dim; if ( i < dim && j < nwl ) { xx1[t] = xx[t] + ( i == isb ) * rr[j]; } }
cdb74f9285d8da64e0857848314bdd1aacb4858d.cu
#include "includes.h" __global__ void metropolisPoposal2 ( const int dim, const int nwl, const int isb, const float *xx, const float *rr, float *xx1 ) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int t = i + j * dim; if ( i < dim && j < nwl ) { xx1[t] = xx[t] + ( i == isb ) * rr[j]; } }
9064a176c885652accedc514c58ee01eec5c27a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" __global__ void tanh_float(int n,int idx,float *dy,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = tanh(dy[i]); } }
9064a176c885652accedc514c58ee01eec5c27a3.cu
#include "includes.h" extern "C" __global__ void tanh_float(int n,int idx,float *dy,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = tanh(dy[i]); } }
689464097fa632910ff107fb5e9323bc926ab78e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CudaRTIter.h" #include "CudaSphere.h" #define MAX_TREE_SIZE 31 __device__ unsigned GetCurrentIndex(unsigned width, unsigned height) { unsigned x = blockIdx.x*blockDim.x + threadIdx.x; unsigned y = blockIdx.y*blockDim.y + threadIdx.y; return x + width * y; } __device__ float3 Trace(unsigned currInd, unsigned krInd, unsigned ktInd, float3* directions, float3* bases, float3* coefficients, unsigned depth, CudaObject** objects, int objSize) { auto direction = directions[currInd]; auto base = bases[currInd]; auto coefficient = coefficients[currInd]; // Default to no secondary rays if (depth < MAX_RAY_DEPTH-1) { coefficients[krInd] = make_float3(0,0,0); coefficients[ktInd] = make_float3(0,0,0); } // Ray Tracing finished if (coefficient.x == 0 && coefficient.y == 0 && coefficient.z == 0) { return make_float3(0,0,0); } // Get Intersection float tnear = INFINITY; const CudaObject* object = GetClosestObject( base, direction, tnear, objects, objSize ); if (object == nullptr) { return make_float3(2,2,2)*coefficient; } float3 phit = base + direction * tnear; float3 nhit = object->CalculateHit(phit); nhit = normalize(nhit); float bias = 1e-4; bool inside = false; if (dot(direction, nhit) > 0) { nhit *= -1; inside = true; } float3 intersection = phit + nhit * bias; auto surfaceColor = make_float3(0,0,0); // Check reflection if ((object->transparency > 0 || object->reflection > 0) && depth < MAX_RAY_DEPTH-1) { float facingRatio = -1 * dot(direction, nhit); // change the mix value to tweak the effect float fresnelEffect = Mix(pow(1 - facingRatio, 3), 1, 0.1); // compute reflection direction float3 reflDir = direction - nhit * 2 * dot(direction, nhit); // Update reflection ray bases[krInd] = intersection; directions[krInd] = normalize(reflDir); coefficients[krInd] = object->surfaceColor * fresnelEffect * coefficient; // Check refraction if (object->transparency) { float ior = 1.1, eta = (inside) ? ior : 1 / ior; // are we inside or outside the surface? float cosi = -1 * dot(nhit, direction); float k = 1 - eta * eta * (1 - cosi * cosi); float3 refrDir = direction * eta + nhit * (eta * cosi - sqrtf(k)); // Update refraction ray intersection = phit - nhit * bias; bases[ktInd] = intersection; directions[ktInd] = normalize(refrDir); coefficients[ktInd] = object->surfaceColor * (1-fresnelEffect) * coefficient * object->transparency; } return make_float3(0,0,0); } // Diffuse object, compute illumination for (unsigned i = 0; i < objSize; ++i) { if (objects[i]->emissionColor.x > 0) { // this is a light float3 transmission = make_float3(1,1,1); float3 lightDirection = -1 * objects[i]->CalculateHit(phit); lightDirection = normalize(lightDirection); // Check for obstruction for (unsigned j = 0; j < objSize; ++j) { if (i != j) { float t0, t1; if (objects[j]->Intersect(intersection, lightDirection, t0, t1)) { transmission = make_float3(0,0,0); break; } } } surfaceColor += object->surfaceColor * transmission * fmaxf(float(0), dot(nhit, lightDirection)) * objects[i]->emissionColor; } } // If diffuse object, stop ray tracing return (surfaceColor + object->emissionColor)*coefficient; } __global__ void Initialize(float3* coefficients, float3* directions, float3* bases, unsigned width, unsigned height) { unsigned x = blockIdx.x*blockDim.x + threadIdx.x; unsigned y = blockIdx.y*blockDim.y + threadIdx.y; unsigned i = GetCurrentIndex(width, height); // Directions float invWidth = 1 / float(width), invHeight = 1 / float(height); float fov = 30, aspectRatio = width / float(height); float angle = tanf(M_PI * 0.5 * fov / 180.); float xx = (2 * ((x + 0.5f) * invWidth) - 1) * angle * aspectRatio; float yy = (1 - 2 * ((y + 0.5f) * invHeight)) * angle; float3 rayDir = normalize(make_float3(xx, yy, -1)); directions[i] = rayDir; // Bases bases[i] = make_float3(0,0,0); // Coeficients coefficients[i] = make_float3(1,1,1); } __global__ void Render(float3* layers, unsigned depth, float3* coefficients, float3* directions, float3* bases, CudaObject** objects, int objSize, unsigned width, unsigned height) { unsigned i = GetCurrentIndex(width, height); unsigned start = powf(2, depth); unsigned end = start * 2 - 1; --start; for (int index = start; index < end; ++index) { unsigned currInd = i + width*height*index; unsigned krInd = i + width*height*(2*index+1); unsigned ktInd = i + width*height*(2*index+2); layers[currInd] = Trace( currInd, krInd, ktInd, directions, bases, coefficients, depth, objects, objSize ); } } __global__ void Reassemble(float3* layers, unsigned depth, unsigned width, unsigned height) { unsigned i = GetCurrentIndex(width, height); unsigned start = powf(2, depth); unsigned end = start * 2 - 1; --start; for (int index = start; index < end; ++index) { layers[i] += layers[i+width*height*index]; } } __global__ void AllocateIter(CudaObject** objects) { new (objects[0]) CudaSphere(make_float3(0, -10004, -20), 10000, make_float3(0.2, 0.2, 0.2), 0, 0); new (objects[1]) CudaSphere(make_float3(0, 0, -20), 4.0, make_float3(1.0, 0.32, 0.36), 1, 0.5); new (objects[2]) CudaSphere(make_float3(5, -1, -15), 2, make_float3(0.9, 0.76, 0.46), 1, 0.0); new (objects[3]) CudaSphere(make_float3(5, 0, -25), 3, make_float3(0.65, 0.77, 0.97), 1, 0.0); new (objects[4]) CudaSphere(make_float3(-5.5, 0, -15), 3, make_float3(0.9, 0.9, 0.9), 1, 0.0); new (objects[5]) CudaSphere(make_float3(0.0, 20, -30), 3, make_float3(0, 0, 0), 0, 0.0f, make_float3(3,3,3)); } CudaRTIter::CudaRTIter(unsigned width, unsigned height) :mWidth(width) ,mHeight(height) { checkCudaErrors( hipMalloc(&mLayers, MAX_TREE_SIZE*width*height*sizeof(float3)) ); checkCudaErrors( hipMalloc(&mDirections, MAX_TREE_SIZE*width*height*sizeof(float3)) ); checkCudaErrors( hipMalloc(&mBases, MAX_TREE_SIZE*width*height*sizeof(float3)) ); checkCudaErrors( hipMalloc(&mCoefficients, MAX_TREE_SIZE*width*height*sizeof(float3)) ); } CudaRTIter::~CudaRTIter() { // free CUDA memory checkCudaErrors(hipFree(mLayers)); checkCudaErrors(hipFree(mDirections)); checkCudaErrors(hipFree(mBases)); checkCudaErrors(hipFree(mCoefficients)); } void CudaRTIter::RenderWrapper(float3* image) { CudaObject** objects; int size = 6; checkCudaErrors(hipMallocManaged(&objects, size*sizeof(CudaObject*))); checkCudaErrors(hipMallocManaged((void**)&objects, size*sizeof(CudaObject*))); for (int i = 0; i < size; ++i) { checkCudaErrors(hipMallocManaged((void**)&objects[i], sizeof(CudaSphere))); } hipLaunchKernelGGL(( AllocateIter), dim3(1),dim3(1), 0, 0, objects); // dim3 is CUDA specific type, block and grid are required to schedule CUDA threads over streaming multiprocessors dim3 block(8, 8, 1); dim3 grid(mWidth / block.x, mHeight / block.y, 1); std::cout << "Initialize" << std::endl; // schedule threads on device and launch CUDA kernel from host hipLaunchKernelGGL(( Initialize), dim3(grid), dim3(block) , 0, 0, mCoefficients, mDirections, mBases, mWidth, mHeight); checkCudaErrors(hipDeviceSynchronize()); // Iterate MAX_RAY_DEPTH times std::cout << "Trace" << std::endl; for (unsigned depth = 0; depth < MAX_RAY_DEPTH; ++depth) { hipLaunchKernelGGL(( Render), dim3(grid), dim3(block) , 0, 0, mLayers, depth, mCoefficients, mDirections, mBases, objects, size, mWidth, mHeight); } checkCudaErrors(hipDeviceSynchronize()); // Reassemble image std::cout << "Assemble" << std::endl; for (unsigned depth = MAX_RAY_DEPTH - 1; depth > 0; --depth) { hipLaunchKernelGGL(( Reassemble), dim3(grid), dim3(block) , 0, 0, mLayers, depth, mWidth, mHeight); } checkCudaErrors(hipDeviceSynchronize()); // Copy results back std::cout << "Output" << std::endl; hipMemcpy( image, mLayers, mWidth*mHeight*sizeof(float3), hipMemcpyDeviceToHost ); // Free up memory for (int i = 0; i < size; ++i) { checkCudaErrors(hipFree(objects[i])); } checkCudaErrors(hipFree(objects)); }
689464097fa632910ff107fb5e9323bc926ab78e.cu
#include "CudaRTIter.h" #include "CudaSphere.h" #define MAX_TREE_SIZE 31 __device__ unsigned GetCurrentIndex(unsigned width, unsigned height) { unsigned x = blockIdx.x*blockDim.x + threadIdx.x; unsigned y = blockIdx.y*blockDim.y + threadIdx.y; return x + width * y; } __device__ float3 Trace(unsigned currInd, unsigned krInd, unsigned ktInd, float3* directions, float3* bases, float3* coefficients, unsigned depth, CudaObject** objects, int objSize) { auto direction = directions[currInd]; auto base = bases[currInd]; auto coefficient = coefficients[currInd]; // Default to no secondary rays if (depth < MAX_RAY_DEPTH-1) { coefficients[krInd] = make_float3(0,0,0); coefficients[ktInd] = make_float3(0,0,0); } // Ray Tracing finished if (coefficient.x == 0 && coefficient.y == 0 && coefficient.z == 0) { return make_float3(0,0,0); } // Get Intersection float tnear = INFINITY; const CudaObject* object = GetClosestObject( base, direction, tnear, objects, objSize ); if (object == nullptr) { return make_float3(2,2,2)*coefficient; } float3 phit = base + direction * tnear; float3 nhit = object->CalculateHit(phit); nhit = normalize(nhit); float bias = 1e-4; bool inside = false; if (dot(direction, nhit) > 0) { nhit *= -1; inside = true; } float3 intersection = phit + nhit * bias; auto surfaceColor = make_float3(0,0,0); // Check reflection if ((object->transparency > 0 || object->reflection > 0) && depth < MAX_RAY_DEPTH-1) { float facingRatio = -1 * dot(direction, nhit); // change the mix value to tweak the effect float fresnelEffect = Mix(pow(1 - facingRatio, 3), 1, 0.1); // compute reflection direction float3 reflDir = direction - nhit * 2 * dot(direction, nhit); // Update reflection ray bases[krInd] = intersection; directions[krInd] = normalize(reflDir); coefficients[krInd] = object->surfaceColor * fresnelEffect * coefficient; // Check refraction if (object->transparency) { float ior = 1.1, eta = (inside) ? ior : 1 / ior; // are we inside or outside the surface? float cosi = -1 * dot(nhit, direction); float k = 1 - eta * eta * (1 - cosi * cosi); float3 refrDir = direction * eta + nhit * (eta * cosi - sqrtf(k)); // Update refraction ray intersection = phit - nhit * bias; bases[ktInd] = intersection; directions[ktInd] = normalize(refrDir); coefficients[ktInd] = object->surfaceColor * (1-fresnelEffect) * coefficient * object->transparency; } return make_float3(0,0,0); } // Diffuse object, compute illumination for (unsigned i = 0; i < objSize; ++i) { if (objects[i]->emissionColor.x > 0) { // this is a light float3 transmission = make_float3(1,1,1); float3 lightDirection = -1 * objects[i]->CalculateHit(phit); lightDirection = normalize(lightDirection); // Check for obstruction for (unsigned j = 0; j < objSize; ++j) { if (i != j) { float t0, t1; if (objects[j]->Intersect(intersection, lightDirection, t0, t1)) { transmission = make_float3(0,0,0); break; } } } surfaceColor += object->surfaceColor * transmission * fmaxf(float(0), dot(nhit, lightDirection)) * objects[i]->emissionColor; } } // If diffuse object, stop ray tracing return (surfaceColor + object->emissionColor)*coefficient; } __global__ void Initialize(float3* coefficients, float3* directions, float3* bases, unsigned width, unsigned height) { unsigned x = blockIdx.x*blockDim.x + threadIdx.x; unsigned y = blockIdx.y*blockDim.y + threadIdx.y; unsigned i = GetCurrentIndex(width, height); // Directions float invWidth = 1 / float(width), invHeight = 1 / float(height); float fov = 30, aspectRatio = width / float(height); float angle = tanf(M_PI * 0.5 * fov / 180.); float xx = (2 * ((x + 0.5f) * invWidth) - 1) * angle * aspectRatio; float yy = (1 - 2 * ((y + 0.5f) * invHeight)) * angle; float3 rayDir = normalize(make_float3(xx, yy, -1)); directions[i] = rayDir; // Bases bases[i] = make_float3(0,0,0); // Coeficients coefficients[i] = make_float3(1,1,1); } __global__ void Render(float3* layers, unsigned depth, float3* coefficients, float3* directions, float3* bases, CudaObject** objects, int objSize, unsigned width, unsigned height) { unsigned i = GetCurrentIndex(width, height); unsigned start = powf(2, depth); unsigned end = start * 2 - 1; --start; for (int index = start; index < end; ++index) { unsigned currInd = i + width*height*index; unsigned krInd = i + width*height*(2*index+1); unsigned ktInd = i + width*height*(2*index+2); layers[currInd] = Trace( currInd, krInd, ktInd, directions, bases, coefficients, depth, objects, objSize ); } } __global__ void Reassemble(float3* layers, unsigned depth, unsigned width, unsigned height) { unsigned i = GetCurrentIndex(width, height); unsigned start = powf(2, depth); unsigned end = start * 2 - 1; --start; for (int index = start; index < end; ++index) { layers[i] += layers[i+width*height*index]; } } __global__ void AllocateIter(CudaObject** objects) { new (objects[0]) CudaSphere(make_float3(0, -10004, -20), 10000, make_float3(0.2, 0.2, 0.2), 0, 0); new (objects[1]) CudaSphere(make_float3(0, 0, -20), 4.0, make_float3(1.0, 0.32, 0.36), 1, 0.5); new (objects[2]) CudaSphere(make_float3(5, -1, -15), 2, make_float3(0.9, 0.76, 0.46), 1, 0.0); new (objects[3]) CudaSphere(make_float3(5, 0, -25), 3, make_float3(0.65, 0.77, 0.97), 1, 0.0); new (objects[4]) CudaSphere(make_float3(-5.5, 0, -15), 3, make_float3(0.9, 0.9, 0.9), 1, 0.0); new (objects[5]) CudaSphere(make_float3(0.0, 20, -30), 3, make_float3(0, 0, 0), 0, 0.0f, make_float3(3,3,3)); } CudaRTIter::CudaRTIter(unsigned width, unsigned height) :mWidth(width) ,mHeight(height) { checkCudaErrors( cudaMalloc(&mLayers, MAX_TREE_SIZE*width*height*sizeof(float3)) ); checkCudaErrors( cudaMalloc(&mDirections, MAX_TREE_SIZE*width*height*sizeof(float3)) ); checkCudaErrors( cudaMalloc(&mBases, MAX_TREE_SIZE*width*height*sizeof(float3)) ); checkCudaErrors( cudaMalloc(&mCoefficients, MAX_TREE_SIZE*width*height*sizeof(float3)) ); } CudaRTIter::~CudaRTIter() { // free CUDA memory checkCudaErrors(cudaFree(mLayers)); checkCudaErrors(cudaFree(mDirections)); checkCudaErrors(cudaFree(mBases)); checkCudaErrors(cudaFree(mCoefficients)); } void CudaRTIter::RenderWrapper(float3* image) { CudaObject** objects; int size = 6; checkCudaErrors(cudaMallocManaged(&objects, size*sizeof(CudaObject*))); checkCudaErrors(cudaMallocManaged((void**)&objects, size*sizeof(CudaObject*))); for (int i = 0; i < size; ++i) { checkCudaErrors(cudaMallocManaged((void**)&objects[i], sizeof(CudaSphere))); } AllocateIter<<<1,1>>>(objects); // dim3 is CUDA specific type, block and grid are required to schedule CUDA threads over streaming multiprocessors dim3 block(8, 8, 1); dim3 grid(mWidth / block.x, mHeight / block.y, 1); std::cout << "Initialize" << std::endl; // schedule threads on device and launch CUDA kernel from host Initialize<<< grid, block >>>(mCoefficients, mDirections, mBases, mWidth, mHeight); checkCudaErrors(cudaDeviceSynchronize()); // Iterate MAX_RAY_DEPTH times std::cout << "Trace" << std::endl; for (unsigned depth = 0; depth < MAX_RAY_DEPTH; ++depth) { Render<<< grid, block >>>(mLayers, depth, mCoefficients, mDirections, mBases, objects, size, mWidth, mHeight); } checkCudaErrors(cudaDeviceSynchronize()); // Reassemble image std::cout << "Assemble" << std::endl; for (unsigned depth = MAX_RAY_DEPTH - 1; depth > 0; --depth) { Reassemble<<< grid, block >>>(mLayers, depth, mWidth, mHeight); } checkCudaErrors(cudaDeviceSynchronize()); // Copy results back std::cout << "Output" << std::endl; cudaMemcpy( image, mLayers, mWidth*mHeight*sizeof(float3), cudaMemcpyDeviceToHost ); // Free up memory for (int i = 0; i < size; ++i) { checkCudaErrors(cudaFree(objects[i])); } checkCudaErrors(cudaFree(objects)); }
37e6487965f82aaf3ae9cc855dfb15644a224c1f.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2023 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. #include "BondedGroupData.cuh" #include "ParticleData.cuh" #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #include <hip/hip_runtime.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/scan.h> #include <thrust/sort.h> #pragma GCC diagnostic pop /*! \file BondedGroupData.cu \brief Implements the helper functions (GPU version) for updating the GPU bonded group tables */ namespace hoomd { template<unsigned int group_size, typename group_t> __global__ void gpu_count_groups_kernel(const unsigned int n_groups, const group_t* d_group_table, const unsigned int* d_rtag, unsigned int* d_scratch_idx, unsigned int* d_scratch_g, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag) { unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx >= n_groups) return; group_t g = d_group_table[group_idx]; for (unsigned int i = 0; i < group_size; ++i) { unsigned int tag_i = g.tag[i]; unsigned int pidx_i = d_rtag[tag_i]; // detect incomplete groups if (pidx_i == NOT_LOCAL) atomicMax(d_condition, next_flag + 1 + group_idx); // write out group_idx to temporary array d_scratch_g[i * n_groups + group_idx] = group_idx; d_scratch_idx[i * n_groups + group_idx] = pidx_i; // atomically increment number of groups unsigned int n = 0; if (pidx_i != NOT_LOCAL) n = atomicInc(&d_n_groups[pidx_i], 0xffffffff); if (n >= max_n_groups) // set flag to indicate we need to grow the output array atomicMax(d_condition, next_flag); } } template<unsigned int group_size, typename group_t> __global__ void gpu_group_scatter_kernel(unsigned int n_scratch, const unsigned int* d_scratch_g, const unsigned int* d_scratch_idx, const unsigned int* d_offset, const group_t* d_members, const typeval_union* d_group_typeval, const unsigned int* d_rtag, group_t* d_pidx_group_table, unsigned int* d_pidx_gpos_table, unsigned int pidx_group_table_pitch, bool has_type_mapping) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n_scratch) return; unsigned int pidx = d_scratch_idx[i]; unsigned int offset = d_offset[i] * pidx_group_table_pitch + pidx; // load group unsigned int group_idx = d_scratch_g[i]; group_t g = d_members[group_idx]; // construct compact group representation, excluding particle pidx group_t p; if (has_type_mapping) { // last element = group type p.idx[group_size - 1] = d_group_typeval[group_idx].type; } else { // last element = group index p.idx[group_size - 1] = group_idx; } unsigned int j = 0; // position in group unsigned int gpos = 0; for (unsigned int k = 0; k < group_size; ++k) { unsigned int tag_k = g.tag[k]; unsigned int pidx_k = d_rtag[tag_k]; if (pidx_k == pidx) { gpos = k; continue; } p.idx[j++] = pidx_k; } d_pidx_group_table[offset] = p; d_pidx_gpos_table[offset] = gpos; } template<unsigned int group_size, typename group_t> void gpu_update_group_table(const unsigned int n_groups, const unsigned int N, const group_t* d_group_table, const typeval_union* d_group_typeval, const unsigned int* d_rtag, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag, unsigned int& flag, group_t* d_pidx_group_table, unsigned int* d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int* d_scratch_g, unsigned int* d_scratch_idx, unsigned int* d_offsets, bool has_type_mapping, CachedAllocator& alloc) { // construct scratch table by expanding the group table by particle index unsigned int block_size = 256; unsigned n_blocks = n_groups / block_size + 1; // reset number of groups hipMemsetAsync(d_n_groups, 0, sizeof(unsigned int) * N); hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_count_groups_kernel<group_size>), dim3(n_blocks), dim3(block_size), 0, 0, n_groups, d_group_table, d_rtag, d_scratch_idx, d_scratch_g, d_n_groups, max_n_groups, d_condition, next_flag); // read back flag hipMemcpy(&flag, d_condition, sizeof(unsigned int), hipMemcpyDeviceToHost); if (!(flag >= next_flag) && n_groups) { // we are good, fill group table // sort groups by particle idx thrust::device_ptr<unsigned int> scratch_idx(d_scratch_idx); thrust::device_ptr<unsigned int> scratch_g(d_scratch_g); #ifdef __HIP_PLATFORM_HCC__ thrust::sort_by_key(thrust::hip::par(alloc), #else thrust::sort_by_key(thrust::hip::par(alloc), #endif scratch_idx, scratch_idx + group_size * n_groups, scratch_g); // perform a segmented scan of d_scratch_idx thrust::device_ptr<unsigned int> offsets(d_offsets); thrust::constant_iterator<unsigned int> const_it(1); #ifdef __HIP_PLATFORM_HCC__ thrust::exclusive_scan_by_key(thrust::hip::par(alloc), #else thrust::exclusive_scan_by_key(thrust::hip::par(alloc), #endif scratch_idx, scratch_idx + group_size * n_groups, const_it, offsets); // scatter groups to destinations block_size = 256; n_blocks = (group_size * n_groups) / block_size + 1; hipLaunchKernelGGL(gpu_group_scatter_kernel<group_size>, dim3(n_blocks), dim3(block_size), 0, 0, n_groups * group_size, d_scratch_g, d_scratch_idx, d_offsets, d_group_table, d_group_typeval, d_rtag, d_pidx_group_table, d_pidx_gpos_table, pidx_group_table_pitch, has_type_mapping); } } /* * Explicit template instantiations */ //! BondData template void gpu_update_group_table<2>(const unsigned int n_groups, const unsigned int N, const union group_storage<2>* d_group_table, const typeval_union* d_group_typeval, const unsigned int* d_rtag, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag, unsigned int& flag, group_storage<2>* d_pidx_group_table, unsigned int* d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int* d_scratch_g, unsigned int* d_scratch_idx, unsigned int* d_offsets, bool has_type_mapping, CachedAllocator& alloc); //! AngleData template void gpu_update_group_table<3>(const unsigned int n_groups, const unsigned int N, const union group_storage<3>* d_group_table, const typeval_union* d_group_typeval, const unsigned int* d_rtag, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag, unsigned int& flag, group_storage<3>* d_pidx_group_table, unsigned int* d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int* d_scratch_g, unsigned int* d_scratch_idx, unsigned int* d_offsets, bool has_type_mapping, CachedAllocator& alloc); //! DihedralData and ImproperData template void gpu_update_group_table<4>(const unsigned int n_groups, const unsigned int N, const union group_storage<4>* d_group_table, const typeval_union* d_group_typeval, const unsigned int* d_rtag, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag, unsigned int& flag, group_storage<4>* d_pidx_group_table, unsigned int* d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int* d_scratch_g, unsigned int* d_scratch_idx, unsigned int* d_offsets, bool has_type_mapping, CachedAllocator& alloc); //! MeshTriangleData template void gpu_update_group_table<6>(const unsigned int n_groups, const unsigned int N, const union group_storage<6>* d_group_table, const typeval_union* d_group_typeval, const unsigned int* d_rtag, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag, unsigned int& flag, group_storage<6>* d_pidx_group_table, unsigned int* d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int* d_scratch_g, unsigned int* d_scratch_idx, unsigned int* d_offsets, bool has_type_mapping, CachedAllocator& alloc); } // end namespace hoomd
37e6487965f82aaf3ae9cc855dfb15644a224c1f.cu
// Copyright (c) 2009-2023 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. #include "BondedGroupData.cuh" #include "ParticleData.cuh" #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #include <hip/hip_runtime.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/scan.h> #include <thrust/sort.h> #pragma GCC diagnostic pop /*! \file BondedGroupData.cu \brief Implements the helper functions (GPU version) for updating the GPU bonded group tables */ namespace hoomd { template<unsigned int group_size, typename group_t> __global__ void gpu_count_groups_kernel(const unsigned int n_groups, const group_t* d_group_table, const unsigned int* d_rtag, unsigned int* d_scratch_idx, unsigned int* d_scratch_g, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag) { unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx >= n_groups) return; group_t g = d_group_table[group_idx]; for (unsigned int i = 0; i < group_size; ++i) { unsigned int tag_i = g.tag[i]; unsigned int pidx_i = d_rtag[tag_i]; // detect incomplete groups if (pidx_i == NOT_LOCAL) atomicMax(d_condition, next_flag + 1 + group_idx); // write out group_idx to temporary array d_scratch_g[i * n_groups + group_idx] = group_idx; d_scratch_idx[i * n_groups + group_idx] = pidx_i; // atomically increment number of groups unsigned int n = 0; if (pidx_i != NOT_LOCAL) n = atomicInc(&d_n_groups[pidx_i], 0xffffffff); if (n >= max_n_groups) // set flag to indicate we need to grow the output array atomicMax(d_condition, next_flag); } } template<unsigned int group_size, typename group_t> __global__ void gpu_group_scatter_kernel(unsigned int n_scratch, const unsigned int* d_scratch_g, const unsigned int* d_scratch_idx, const unsigned int* d_offset, const group_t* d_members, const typeval_union* d_group_typeval, const unsigned int* d_rtag, group_t* d_pidx_group_table, unsigned int* d_pidx_gpos_table, unsigned int pidx_group_table_pitch, bool has_type_mapping) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n_scratch) return; unsigned int pidx = d_scratch_idx[i]; unsigned int offset = d_offset[i] * pidx_group_table_pitch + pidx; // load group unsigned int group_idx = d_scratch_g[i]; group_t g = d_members[group_idx]; // construct compact group representation, excluding particle pidx group_t p; if (has_type_mapping) { // last element = group type p.idx[group_size - 1] = d_group_typeval[group_idx].type; } else { // last element = group index p.idx[group_size - 1] = group_idx; } unsigned int j = 0; // position in group unsigned int gpos = 0; for (unsigned int k = 0; k < group_size; ++k) { unsigned int tag_k = g.tag[k]; unsigned int pidx_k = d_rtag[tag_k]; if (pidx_k == pidx) { gpos = k; continue; } p.idx[j++] = pidx_k; } d_pidx_group_table[offset] = p; d_pidx_gpos_table[offset] = gpos; } template<unsigned int group_size, typename group_t> void gpu_update_group_table(const unsigned int n_groups, const unsigned int N, const group_t* d_group_table, const typeval_union* d_group_typeval, const unsigned int* d_rtag, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag, unsigned int& flag, group_t* d_pidx_group_table, unsigned int* d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int* d_scratch_g, unsigned int* d_scratch_idx, unsigned int* d_offsets, bool has_type_mapping, CachedAllocator& alloc) { // construct scratch table by expanding the group table by particle index unsigned int block_size = 256; unsigned n_blocks = n_groups / block_size + 1; // reset number of groups hipMemsetAsync(d_n_groups, 0, sizeof(unsigned int) * N); hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_count_groups_kernel<group_size>), dim3(n_blocks), dim3(block_size), 0, 0, n_groups, d_group_table, d_rtag, d_scratch_idx, d_scratch_g, d_n_groups, max_n_groups, d_condition, next_flag); // read back flag hipMemcpy(&flag, d_condition, sizeof(unsigned int), hipMemcpyDeviceToHost); if (!(flag >= next_flag) && n_groups) { // we are good, fill group table // sort groups by particle idx thrust::device_ptr<unsigned int> scratch_idx(d_scratch_idx); thrust::device_ptr<unsigned int> scratch_g(d_scratch_g); #ifdef __HIP_PLATFORM_HCC__ thrust::sort_by_key(thrust::hip::par(alloc), #else thrust::sort_by_key(thrust::cuda::par(alloc), #endif scratch_idx, scratch_idx + group_size * n_groups, scratch_g); // perform a segmented scan of d_scratch_idx thrust::device_ptr<unsigned int> offsets(d_offsets); thrust::constant_iterator<unsigned int> const_it(1); #ifdef __HIP_PLATFORM_HCC__ thrust::exclusive_scan_by_key(thrust::hip::par(alloc), #else thrust::exclusive_scan_by_key(thrust::cuda::par(alloc), #endif scratch_idx, scratch_idx + group_size * n_groups, const_it, offsets); // scatter groups to destinations block_size = 256; n_blocks = (group_size * n_groups) / block_size + 1; hipLaunchKernelGGL(gpu_group_scatter_kernel<group_size>, dim3(n_blocks), dim3(block_size), 0, 0, n_groups * group_size, d_scratch_g, d_scratch_idx, d_offsets, d_group_table, d_group_typeval, d_rtag, d_pidx_group_table, d_pidx_gpos_table, pidx_group_table_pitch, has_type_mapping); } } /* * Explicit template instantiations */ //! BondData template void gpu_update_group_table<2>(const unsigned int n_groups, const unsigned int N, const union group_storage<2>* d_group_table, const typeval_union* d_group_typeval, const unsigned int* d_rtag, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag, unsigned int& flag, group_storage<2>* d_pidx_group_table, unsigned int* d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int* d_scratch_g, unsigned int* d_scratch_idx, unsigned int* d_offsets, bool has_type_mapping, CachedAllocator& alloc); //! AngleData template void gpu_update_group_table<3>(const unsigned int n_groups, const unsigned int N, const union group_storage<3>* d_group_table, const typeval_union* d_group_typeval, const unsigned int* d_rtag, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag, unsigned int& flag, group_storage<3>* d_pidx_group_table, unsigned int* d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int* d_scratch_g, unsigned int* d_scratch_idx, unsigned int* d_offsets, bool has_type_mapping, CachedAllocator& alloc); //! DihedralData and ImproperData template void gpu_update_group_table<4>(const unsigned int n_groups, const unsigned int N, const union group_storage<4>* d_group_table, const typeval_union* d_group_typeval, const unsigned int* d_rtag, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag, unsigned int& flag, group_storage<4>* d_pidx_group_table, unsigned int* d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int* d_scratch_g, unsigned int* d_scratch_idx, unsigned int* d_offsets, bool has_type_mapping, CachedAllocator& alloc); //! MeshTriangleData template void gpu_update_group_table<6>(const unsigned int n_groups, const unsigned int N, const union group_storage<6>* d_group_table, const typeval_union* d_group_typeval, const unsigned int* d_rtag, unsigned int* d_n_groups, unsigned int max_n_groups, unsigned int* d_condition, unsigned int next_flag, unsigned int& flag, group_storage<6>* d_pidx_group_table, unsigned int* d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int* d_scratch_g, unsigned int* d_scratch_idx, unsigned int* d_offsets, bool has_type_mapping, CachedAllocator& alloc); } // end namespace hoomd
80e862ee63fd484a6b4d0ef0c5fd77a140839423.hip
// !!! This is a file automatically generated by hipify!!! #include "cudaASPLgraph.hpp" #include "hip/hip_runtime.h" #include <bitset> namespace graphgolf{ ///<<<N,32>>? __global__ void kernel_aspl_init_bits(uint *bits, int N, int offset){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id>=N<<5) return; int v=(id>>5)-offset; int b=id&0x1F; int tmp; int M=(N-offset<1024?N-offset:1024); if(M>=(b+1)<<5){ tmp=0; }else if(b<<5>=M){ tmp=0xFFFFFFFF; }else{ tmp=~((1<<(M-(b<<5)))-1); } if((b<<5)<=v&&v<((b+1)<<5)){ tmp|=1<<(v-(b<<5)); } // sum[id]=0; bits[id]=tmp; } __global__ void kernel_aspl_conv(uint *bits, uint *diff_bits, int *edges, int N, int degree){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id>=N<<5) return; int v=id>>5; int b=id&0x1F; uint tmp=0; for(int i=0;i<degree;i++){ int to = edges[v*degree+i]; tmp|=bits[(to<<5)+b]; } tmp&=~bits[id]; diff_bits[id]=tmp; } __global__ void kernel_aspl_apply(uint *bits, uint *diff_bits, int *sum, int N, int step){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id>=N<<5) return; uint tmp=diff_bits[id]; bits[id]|=tmp; sum[id]+=__popc(tmp)*step; } //1024 -> 1 __global__ void kernel_aspl_reduce_plus(int *sum, int64_t *ret, int length){ __shared__ int64_t tmp[32]; int id = blockIdx.x*blockDim.x+threadIdx.x; int64_t elem = id<length?sum[id]:0; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem+=__shfl_down_sync(0xFFFFFFFF, elem, delta); } int laneid=threadIdx.x&0x1F; int warpid=threadIdx.x>>5; if(laneid==0) tmp[warpid]=elem; __syncthreads(); if(warpid) return; elem=tmp[laneid]; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem+=__shfl_down_sync(0xFFFFFFFF, elem, delta); } if(threadIdx.x==0) ret[blockIdx.x]=elem; } __global__ void kernel_aspl_reduce_OR(uint *bits, uint *ret, int length){ __shared__ uint tmp[32]; int id = blockIdx.x*blockDim.x+threadIdx.x; uint elem = id<length?bits[id]:0; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem|=__shfl_down_sync(0xFFFFFFFF, elem, delta); } int laneid=threadIdx.x&0x1F; int warpid=threadIdx.x>>5; if(laneid==0) tmp[warpid]=elem; __syncthreads(); if(warpid) return; elem=tmp[laneid]; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem|=__shfl_down_sync(0xFFFFFFFF, elem, delta); } if(threadIdx.x==0) ret[blockIdx.x]=elem; } __global__ void kernel_aspl_reduce_AND(uint *bits, uint *ret, int length){ __shared__ uint tmp[32]; int id = blockIdx.x*blockDim.x+threadIdx.x; uint elem = id<length?bits[id]:0xFFFFFFFF; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem&=__shfl_down_sync(0xFFFFFFFF, elem, delta); } int laneid=threadIdx.x&0x1F; int warpid=threadIdx.x>>5; if(laneid==0) tmp[warpid]=elem; __syncthreads(); if(warpid) return; elem=tmp[laneid]; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem&=__shfl_down_sync(0xFFFFFFFF, elem, delta); } if(threadIdx.x==0) ret[blockIdx.x]=elem; } cudaASPLgraph::cudaASPLgraph(int N, int degree_max, int device): N(N), degree_max(degree_max), device(device){ nBlock = (N+31)/32;//32 hipSetDevice(device); hipMalloc((void**)&d_bits,N*32*sizeof(uint)); hipMalloc((void**)&d_diff_bits,N*32*sizeof(uint)); hipMalloc((void**)&d_sum,N*32*sizeof(int)); hipHostMalloc((void**)&h_bits,N*32*sizeof(uint)); hipMalloc((void**)&d_edges,N*degree_max*sizeof(int)); hipHostMalloc((void**)&h_edges,N*degree_max*sizeof(int)); hipMalloc((void**)&d_ret,nBlock*sizeof(int64_t)); hipHostMalloc((void**)&h_ret,nBlock*sizeof(int64_t)); hipMalloc((void**)&d_ret_bits,nBlock*sizeof(uint)); hipHostMalloc((void**)&h_ret_bits,nBlock*sizeof(uint)); } cudaASPLgraph::~cudaASPLgraph(){ hipSetDevice(device); hipFree(d_bits); hipFree(d_diff_bits); hipFree(d_sum); hipHostFree(h_bits); hipFree(d_edges); hipHostFree(h_edges); hipFree(d_ret); hipHostFree(h_ret); hipFree(d_ret_bits); hipHostFree(h_ret_bits); } std::pair<int,int64_t> cudaASPLgraph::calc(graph &g){//, total //std::cout<<"N: "<<N<<" degree_max: "<<degree_max<<" nBlock: "<<nBlock<<std::endl; hipSetDevice(device); for(int i=0;i<N;i++){ for(int j=0;j<degree_max;j++){ if(j<g.edges[i].size()){ h_edges[i*degree_max+j]=g.edges[i][j]; }else{ //() h_edges[i*degree_max+j]=i; } } } hipMemcpy(d_edges,h_edges,N*degree_max*sizeof(int),hipMemcpyHostToDevice); int diameter=0; int64_t total=0; hipMemset(d_sum,0,N*32*sizeof(int)); for(int offset=0;offset<N;offset+=1024){ if(offset)std::cout<<char(27)<<'['<<'F'<<char(27)<<'['<<'E'<<char(27)<<'['<<'K'<<std::flush; std::cout<<offset<<'/'<<N<<std::flush; hipLaunchKernelGGL(( kernel_aspl_init_bits), dim3(nBlock),dim3(1024), 0, 0, d_bits,N,offset); hipDeviceSynchronize(); for(int step=1;step<200;step++){ hipLaunchKernelGGL(( kernel_aspl_conv), dim3(nBlock),dim3(1024), 0, 0, d_bits,d_diff_bits,d_edges,N,degree_max); hipDeviceSynchronize(); hipLaunchKernelGGL(( kernel_aspl_apply), dim3(nBlock),dim3(1024), 0, 0, d_bits,d_diff_bits,d_sum,N,step); hipDeviceSynchronize(); hipLaunchKernelGGL(( kernel_aspl_reduce_OR), dim3(nBlock),dim3(1024), 0, 0, d_diff_bits,d_ret_bits,N<<5); hipDeviceSynchronize(); hipMemcpy(h_ret_bits,d_ret_bits,nBlock*sizeof(uint),hipMemcpyDeviceToHost); uint flag = 0; for(int i=0;i<nBlock;i++) flag|=h_ret_bits[i]; if(flag==0){ hipLaunchKernelGGL(( kernel_aspl_reduce_AND), dim3(nBlock),dim3(1024), 0, 0, d_bits,d_ret_bits,N<<5); hipDeviceSynchronize(); hipMemcpy(h_ret_bits,d_ret_bits,nBlock*sizeof(uint),hipMemcpyDeviceToHost); flag=0xFFFFFFFF; for(int i=0;i<nBlock;i++) flag&=h_ret_bits[i]; if(flag!=0xFFFFFFFF){ //Graph is unconnected std::cout<<"Graph is unconnected"<<std::endl; return std::make_pair(-1,-1); }else{ diameter=::max(diameter,step-1); break; } }else if(step+1==200){ //too large diameter std::cout<<"Too large diameter"<<std::endl; return std::make_pair(-2,-2); } } } std::cout<<char(27)<<'['<<'F'<<char(27)<<'['<<'E'<<char(27)<<'['<<'K'<<std::flush; hipLaunchKernelGGL(( kernel_aspl_reduce_plus), dim3(nBlock),dim3(1024), 0, 0, d_sum,d_ret,N<<5); hipMemcpy(h_ret,d_ret,nBlock*sizeof(int64_t),hipMemcpyDeviceToHost); for(int i=0;i<nBlock;i++) total+=h_ret[i]; return std::make_pair(diameter,total); } }
80e862ee63fd484a6b4d0ef0c5fd77a140839423.cu
#include "cudaASPLgraph.hpp" #include "cuda_runtime.h" #include <bitset> namespace graphgolf{ ///<<<N,32>>で呼び出す? __global__ void kernel_aspl_init_bits(uint *bits, int N, int offset){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id>=N<<5) return; int v=(id>>5)-offset; int b=id&0x1F; int tmp; int M=(N-offset<1024?N-offset:1024); if(M>=(b+1)<<5){ tmp=0; }else if(b<<5>=M){ tmp=0xFFFFFFFF; }else{ tmp=~((1<<(M-(b<<5)))-1); } if((b<<5)<=v&&v<((b+1)<<5)){ tmp|=1<<(v-(b<<5)); } // sum[id]=0; bits[id]=tmp; } __global__ void kernel_aspl_conv(uint *bits, uint *diff_bits, int *edges, int N, int degree){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id>=N<<5) return; int v=id>>5; int b=id&0x1F; uint tmp=0; for(int i=0;i<degree;i++){ int to = edges[v*degree+i]; tmp|=bits[(to<<5)+b]; } tmp&=~bits[id]; diff_bits[id]=tmp; } __global__ void kernel_aspl_apply(uint *bits, uint *diff_bits, int *sum, int N, int step){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id>=N<<5) return; uint tmp=diff_bits[id]; bits[id]|=tmp; sum[id]+=__popc(tmp)*step; } //1024 -> 1 __global__ void kernel_aspl_reduce_plus(int *sum, int64_t *ret, int length){ __shared__ int64_t tmp[32]; int id = blockIdx.x*blockDim.x+threadIdx.x; int64_t elem = id<length?sum[id]:0; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem+=__shfl_down_sync(0xFFFFFFFF, elem, delta); } int laneid=threadIdx.x&0x1F; int warpid=threadIdx.x>>5; if(laneid==0) tmp[warpid]=elem; __syncthreads(); if(warpid) return; elem=tmp[laneid]; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem+=__shfl_down_sync(0xFFFFFFFF, elem, delta); } if(threadIdx.x==0) ret[blockIdx.x]=elem; } __global__ void kernel_aspl_reduce_OR(uint *bits, uint *ret, int length){ __shared__ uint tmp[32]; int id = blockIdx.x*blockDim.x+threadIdx.x; uint elem = id<length?bits[id]:0; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem|=__shfl_down_sync(0xFFFFFFFF, elem, delta); } int laneid=threadIdx.x&0x1F; int warpid=threadIdx.x>>5; if(laneid==0) tmp[warpid]=elem; __syncthreads(); if(warpid) return; elem=tmp[laneid]; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem|=__shfl_down_sync(0xFFFFFFFF, elem, delta); } if(threadIdx.x==0) ret[blockIdx.x]=elem; } __global__ void kernel_aspl_reduce_AND(uint *bits, uint *ret, int length){ __shared__ uint tmp[32]; int id = blockIdx.x*blockDim.x+threadIdx.x; uint elem = id<length?bits[id]:0xFFFFFFFF; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem&=__shfl_down_sync(0xFFFFFFFF, elem, delta); } int laneid=threadIdx.x&0x1F; int warpid=threadIdx.x>>5; if(laneid==0) tmp[warpid]=elem; __syncthreads(); if(warpid) return; elem=tmp[laneid]; #pragma unroll 16 for(int delta=16;delta;delta>>=1){ elem&=__shfl_down_sync(0xFFFFFFFF, elem, delta); } if(threadIdx.x==0) ret[blockIdx.x]=elem; } cudaASPLgraph::cudaASPLgraph(int N, int degree_max, int device): N(N), degree_max(degree_max), device(device){ nBlock = (N+31)/32;//各ブロックに32頂点を押し込む cudaSetDevice(device); cudaMalloc((void**)&d_bits,N*32*sizeof(uint)); cudaMalloc((void**)&d_diff_bits,N*32*sizeof(uint)); cudaMalloc((void**)&d_sum,N*32*sizeof(int)); cudaMallocHost((void**)&h_bits,N*32*sizeof(uint)); cudaMalloc((void**)&d_edges,N*degree_max*sizeof(int)); cudaMallocHost((void**)&h_edges,N*degree_max*sizeof(int)); cudaMalloc((void**)&d_ret,nBlock*sizeof(int64_t)); cudaMallocHost((void**)&h_ret,nBlock*sizeof(int64_t)); cudaMalloc((void**)&d_ret_bits,nBlock*sizeof(uint)); cudaMallocHost((void**)&h_ret_bits,nBlock*sizeof(uint)); } cudaASPLgraph::~cudaASPLgraph(){ cudaSetDevice(device); cudaFree(d_bits); cudaFree(d_diff_bits); cudaFree(d_sum); cudaFreeHost(h_bits); cudaFree(d_edges); cudaFreeHost(h_edges); cudaFree(d_ret); cudaFreeHost(h_ret); cudaFree(d_ret_bits); cudaFreeHost(h_ret_bits); } std::pair<int,int64_t> cudaASPLgraph::calc(graph &g){//直径, totalを返す //std::cout<<"N: "<<N<<" degree_max: "<<degree_max<<" nBlock: "<<nBlock<<std::endl; cudaSetDevice(device); for(int i=0;i<N;i++){ for(int j=0;j<degree_max;j++){ if(j<g.edges[i].size()){ h_edges[i*degree_max+j]=g.edges[i][j]; }else{ //余った場所は自己ループ辺で埋める(あまり良くない) h_edges[i*degree_max+j]=i; } } } cudaMemcpy(d_edges,h_edges,N*degree_max*sizeof(int),cudaMemcpyHostToDevice); int diameter=0; int64_t total=0; cudaMemset(d_sum,0,N*32*sizeof(int)); for(int offset=0;offset<N;offset+=1024){ if(offset)std::cout<<char(27)<<'['<<'F'<<char(27)<<'['<<'E'<<char(27)<<'['<<'K'<<std::flush; std::cout<<offset<<'/'<<N<<std::flush; kernel_aspl_init_bits<<<nBlock,1024>>>(d_bits,N,offset); cudaDeviceSynchronize(); for(int step=1;step<200;step++){ kernel_aspl_conv<<<nBlock,1024>>>(d_bits,d_diff_bits,d_edges,N,degree_max); cudaDeviceSynchronize(); kernel_aspl_apply<<<nBlock,1024>>>(d_bits,d_diff_bits,d_sum,N,step); cudaDeviceSynchronize(); kernel_aspl_reduce_OR<<<nBlock,1024>>>(d_diff_bits,d_ret_bits,N<<5); cudaDeviceSynchronize(); cudaMemcpy(h_ret_bits,d_ret_bits,nBlock*sizeof(uint),cudaMemcpyDeviceToHost); uint flag = 0; for(int i=0;i<nBlock;i++) flag|=h_ret_bits[i]; if(flag==0){ kernel_aspl_reduce_AND<<<nBlock,1024>>>(d_bits,d_ret_bits,N<<5); cudaDeviceSynchronize(); cudaMemcpy(h_ret_bits,d_ret_bits,nBlock*sizeof(uint),cudaMemcpyDeviceToHost); flag=0xFFFFFFFF; for(int i=0;i<nBlock;i++) flag&=h_ret_bits[i]; if(flag!=0xFFFFFFFF){ //Graph is unconnected std::cout<<"Graph is unconnected"<<std::endl; return std::make_pair(-1,-1); }else{ diameter=std::max(diameter,step-1); break; } }else if(step+1==200){ //too large diameter std::cout<<"Too large diameter"<<std::endl; return std::make_pair(-2,-2); } } } std::cout<<char(27)<<'['<<'F'<<char(27)<<'['<<'E'<<char(27)<<'['<<'K'<<std::flush; kernel_aspl_reduce_plus<<<nBlock,1024>>>(d_sum,d_ret,N<<5); cudaMemcpy(h_ret,d_ret,nBlock*sizeof(int64_t),cudaMemcpyDeviceToHost); for(int i=0;i<nBlock;i++) total+=h_ret[i]; return std::make_pair(diameter,total); } }
a68ef85b0821c5b12d3181582235385fbd6f4453.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/layers/accuracy_softmax_loss_layer.hpp" namespace caffe { template<typename Dtype> __global__ void accuracy_softmax_loss_forward_gpu(int threads,int area,int stage,const Dtype* data, const Dtype* label,Dtype* accur) { CUDA_KERNEL_LOOP(index,threads) { int batch_size=index/area; int offset=index%area; Dtype max=Dtype(0.); int k=0; int base=batch_size*area*stage; for(int i=0;i<stage;i++) { int pos=base+i*area+offset; if(max<data[pos]) { max=data[pos]; k=i; } } int s=label[index]>=stage?0:label[index]; accur[index]=Dtype(0.); if(k==s)accur[index]++; } } template<typename Dtype> void AccuracySoftMaxLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top) { const Dtype* data=bottom[0]->gpu_data(); const Dtype* label=bottom[1]->gpu_data(); int stage=bottom[0]->shape(1); int area=bottom[0]->count(2); int count=bottom[1]->count(); Dtype* temp=NULL; hipError_t state=hipMalloc((void**)&temp,sizeof(Dtype)*count); if(state!=hipSuccess)LOG(FATAL)<<"hipMalloc memory errror."; hipLaunchKernelGGL(( accuracy_softmax_loss_forward_gpu), dim3(CAFFE_GET_BLOCKS(count)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, area,stage,data,label,temp); Dtype loss=Dtype(0.); caffe_gpu_asum(count,temp,&loss); top[0]->mutable_cpu_data()[0]=loss/count; hipFree(temp); } template<typename Dtype> void AccuracySoftMaxLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagete_down, const vector<Blob<Dtype>*>& bottom ) { //do-nothing } INSTANTIATE_LAYER_GPU_FUNCS(AccuracySoftMaxLossLayer); }
a68ef85b0821c5b12d3181582235385fbd6f4453.cu
#include "caffe/layers/accuracy_softmax_loss_layer.hpp" namespace caffe { template<typename Dtype> __global__ void accuracy_softmax_loss_forward_gpu(int threads,int area,int stage,const Dtype* data, const Dtype* label,Dtype* accur) { CUDA_KERNEL_LOOP(index,threads) { int batch_size=index/area; int offset=index%area; Dtype max=Dtype(0.); int k=0; int base=batch_size*area*stage; for(int i=0;i<stage;i++) { int pos=base+i*area+offset; if(max<data[pos]) { max=data[pos]; k=i; } } int s=label[index]>=stage?0:label[index]; accur[index]=Dtype(0.); if(k==s)accur[index]++; } } template<typename Dtype> void AccuracySoftMaxLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top) { const Dtype* data=bottom[0]->gpu_data(); const Dtype* label=bottom[1]->gpu_data(); int stage=bottom[0]->shape(1); int area=bottom[0]->count(2); int count=bottom[1]->count(); Dtype* temp=NULL; cudaError_t state=cudaMalloc((void**)&temp,sizeof(Dtype)*count); if(state!=cudaSuccess)LOG(FATAL)<<"cudaMalloc memory errror."; accuracy_softmax_loss_forward_gpu<<<CAFFE_GET_BLOCKS(count),CAFFE_CUDA_NUM_THREADS>>>(count, area,stage,data,label,temp); Dtype loss=Dtype(0.); caffe_gpu_asum(count,temp,&loss); top[0]->mutable_cpu_data()[0]=loss/count; cudaFree(temp); } template<typename Dtype> void AccuracySoftMaxLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagete_down, const vector<Blob<Dtype>*>& bottom ) { //do-nothing } INSTANTIATE_LAYER_GPU_FUNCS(AccuracySoftMaxLossLayer); }
f6c97f6fe38ad7df718f7609abffe763cad198a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2019 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" #define MAX_K 4 #define MAX_BATCH_SIZE 32 #define MAX_N 12 Tensor FFModel::aggregate_spec(const Tensor* inputs, /* gate_preds, gate_assign, full_gate_pred, n * exp_pred */ int n, float lambda_bal, const char* name) { AggregateSpec* aggr = new AggregateSpec(*this, inputs, n, lambda_bal, name); layers.push_back(aggr); return aggr->outputs[0]; } AggregateSpec::AggregateSpec(FFModel& model, const Tensor* _inputs, int _n, float _lambda_bal, const char* name) : Op(model, OP_AGG_SPEC, name, _n+4, _inputs), n(_n), lambda_bal(_lambda_bal), profiling(model.config.profiling) { // FIXME: For now, set upper limits Better: Do as follows, but memory is // assigned per block, so requires to check that // https://stackoverflow.com/questions/5531247/allocating-shared-memory/5531640#5531640 assert(n <= MAX_N && "Increase MAX_N in #define"); assert(inputs[0].adim[0] <= MAX_K && "Increase MAX_K in #define"); assert(inputs[0].adim[1] <= MAX_BATCH_SIZE && "Increase MAX_BATCH_SIZE in #define"); assert(n+4 == numInputs); assert(n > 0); assert(inputs[0].numDim == 2); assert(inputs[1].numDim == 2); assert(inputs[2].numDim == 2); assert(inputs[3].numDim == 2); for(int i = 0; i < inputs[0].numDim; i++) { assert(inputs[0].adim[i] == inputs[1].adim[i]); assert(inputs[0].adim[i] == inputs[2].adim[i]); } assert(inputs[0].adim[1] == inputs[3].adim[1]); assert(inputs[3].adim[0] == n); // expert inputs int num_dim = inputs[4].numDim; int out_dim = inputs[4].adim[0]; for(int i = 1; i < n; i++) { assert(inputs[i+4].numDim == num_dim); assert(inputs[i+4].adim[0] == out_dim); } // output outputs[0].numDim = num_dim; int k = inputs[0].adim[0]; for(int i = 0; i < num_dim-1; i++) outputs[0].adim[i] = inputs[4].adim[i]; outputs[0].adim[num_dim-1] = k*inputs[0].adim[num_dim-1]; numWeights = 0; } void AggregateSpec::create_weights(FFModel& model) { // Do nothing } void AggregateSpec::create_output_and_partition(FFModel& model) { // Retrieve the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is); // Can only partition over the sample dim assert(part_rect.hi[0] == part_rect.lo[0]); int batch_size = inputs[0].adim[1]; int out_dim = inputs[4].adim[0]; int k = inputs[0].adim[0]; const int dims[2] = {k*batch_size, out_dim}; outputs[0] = model.create_tensor<2>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; // Compute partition bound for input for(int i = 0; i < n+4; i++) { Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[i].part.get_index_partition()); if (input_rect == part_rect) { input_lps[i] = inputs[i].part; input_grad_lps[i] = inputs[i].part_grad; } else { model.create_disjoint_partition<2>( inputs[i], (IndexSpaceT<2>)task_is, input_lps[i], input_grad_lps[i]); } } } OpMeta* AggregateSpec::init_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { AggregateSpec* agg = (AggregateSpec*) task->args; FFHandler handle = *((FFHandler*)task->local_args); AggregateSpecMeta* m = new AggregateSpecMeta(handle, agg->n); m->profiling = agg->profiling; return m; } void AggregateSpec::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ ParallelConfig pc; \ std::string pcname = name; \ ff.config.find_parallel_config(DIM, pcname, pc); \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \ argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(AGG_SPEC_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(AggregateSpec)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } __global__ void aggspec_forward_kernel(float** exp_preds, const int* exp_assign, float* output, int n, // num experts const int k, // num chosen experts int exp_samples, // max samples per expert const int batch_size, int out_dim) { __shared__ float* chosen_exp_preds[MAX_K*MAX_BATCH_SIZE]; // Get pred pointers, single thread per block if(threadIdx.x == 0) { int expert_idx[MAX_N] = {0}; for(int i = 0; i < batch_size; i++) { for(int j = 0; j < k; j++) { // Get pointer to chosen expert predictions int expert = exp_assign[i*k+j]; if(expert_idx[expert] >= exp_samples) { // dropped sample chosen_exp_preds[i*k+j] = 0; continue; } chosen_exp_preds[i*k+j] = exp_preds[expert] + expert_idx[expert]*out_dim; expert_idx[expert]++; } } } __syncthreads(); // compute output CUDA_KERNEL_LOOP(i, k*batch_size*out_dim) { if(chosen_exp_preds[i/out_dim] != 0) { output[i] = chosen_exp_preds[i/out_dim][i%out_dim]; } else { output[i] = 0.0f; } } } __device__ void aggspec_backward_kernel_gate(const float* output_grad, float* full_gate_grads, const int* expert_assign, const bool* cache_corr, const float* gate_pred, int* expert_bal, float lambda_bal, int batch_size, int k, int n, int out_dim) { __shared__ float gate_grad_sum[MAX_BATCH_SIZE]; // init gate_grad_sum to 0 CUDA_KERNEL_LOOP(i, batch_size) { gate_grad_sum[i] = 0.0f; } __syncthreads(); // get sum of expert errors /* NOTE: Errors just squared L2 norm of gradients. * batch_size because the expert gradients are /= batch_size and then it would be /= batch_size^2 here */ CUDA_KERNEL_LOOP(i, batch_size*k*out_dim) { if(cache_corr[i/(k*out_dim)]) { float res = output_grad[i] * output_grad[i] * batch_size; float* gate_grad_idx = full_gate_grads + (i/(out_dim*k))*n + expert_assign[(i/(out_dim*k))*k+(i/out_dim)%k]; atomicAdd(gate_grad_idx, res); atomicAdd(gate_grad_sum+i/(k*out_dim), res); } } // Compute gate gradients: // Assigned expert i, sample j: pred(i,j) - err_(i,j)/sum_l err(l,j) __syncthreads(); CUDA_KERNEL_LOOP(i, k*batch_size) { if(cache_corr[i/k]) { full_gate_grads[i/k*n + expert_assign[i]] /= gate_grad_sum[i/k]; full_gate_grads[i/k*n + expert_assign[i]] -= (1.0f - gate_pred[i]); } } // balance term __syncthreads(); CUDA_KERNEL_LOOP(i, n*batch_size) { full_gate_grads[i] += lambda_bal*expert_bal[i%n]; } __syncthreads(); // make 0 mean CUDA_KERNEL_LOOP(i, n*batch_size) { int start = (i/n)*n; float sub = -full_gate_grads[i]/n; for(int j = 0; j < n; j++) { atomicAdd(full_gate_grads+start+j, sub); } } } __device__ void aggspec_backward_kernel_exp(const float* output_grad, const float* gate_preds, float** exp_grads, int batch_size, int k, int out_dim) { // compute expert gradients CUDA_KERNEL_LOOP(i, k*out_dim*batch_size) { if (exp_grads[i/out_dim] != 0) { exp_grads[i/out_dim][i%out_dim] += gate_preds[i/out_dim] * output_grad[i]; } } } __global__ void aggspec_backward_kernel(float** exp_grads, const int* exp_assign, const int* true_exp_assign, const float* gating_net_preds, float* full_gating_grads, const float* output_grads, int n, // num experts int k, // num chosen experts int exp_samples, // max samples per expert float lambda_bal, int batch_size, int out_dim) { __shared__ float* chosen_exp_grads[MAX_K*MAX_BATCH_SIZE]; __shared__ int expert_bal[MAX_N]; __shared__ bool cache_corr[MAX_BATCH_SIZE]; // Get pred pointers, single thread per block if(threadIdx.x == 0) { // init arrays for(int i = 0; i < n; i++) expert_bal[i] = 0; for(int i = 0; i < batch_size; i++) cache_corr[i] = true; // Get pointer to chosen expert grads and expert counts for(int i = 0; i < batch_size; i++) { for(int j = 0; j < k; j++) { int expert = true_exp_assign[k*i + j]; if(expert != exp_assign[k*i + j]) cache_corr[i] = false; if(expert_bal[expert] >= exp_samples) { // dropped sample chosen_exp_grads[i*k+j] = 0; expert_bal[expert]++; continue; } chosen_exp_grads[i*k+j] = exp_grads[expert] + expert_bal[expert]*out_dim; expert_bal[expert]++; } } } __syncthreads(); // NOTE: These 2 functions could execute independently in parallel // get expert gradients aggspec_backward_kernel_exp(output_grads, gating_net_preds, chosen_exp_grads, batch_size, k, out_dim); // get gating net gradients aggspec_backward_kernel_gate(output_grads, full_gating_grads, exp_assign, cache_corr, gating_net_preds, expert_bal, (lambda_bal*n)/batch_size, batch_size, k, n, out_dim); } void AggregateSpec::forward_task(const Task *task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { int n = ((AggregateSpec*)task->args)->n; assert((int)regions.size() == n+3); assert((int)task->regions.size() == n+3); const AggregateSpecMeta* m = *((AggregateSpecMeta**)task->local_args); // get gate_pred, gate_assign, output const AccessorRO<float, 2> acc_gate_pred(regions[0], FID_DATA); const AccessorRO<int, 2> acc_gate_assign(regions[1], FID_DATA); const AccessorWO<float, 2> acc_output(regions[n+2], FID_DATA); Rect<2> rect_gate_pred = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<2> rect_gate_assign = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Rect<2> rect_output = runtime->get_index_space_domain( ctx, task->regions[n+2].region.get_index_space()); coord_t batch_size = rect_gate_pred.hi[1] - rect_gate_pred.lo[1] + 1; assert(batch_size == rect_gate_assign.hi[1] - rect_gate_assign.lo[1] + 1); coord_t k = rect_gate_pred.hi[0] - rect_gate_pred.lo[0] + 1; assert(k == rect_gate_assign.hi[0] - rect_gate_assign.lo[0] + 1); assert(k*batch_size == rect_output.hi[1] - rect_output.lo[1] + 1); coord_t out_dim = rect_output.hi[0] - rect_output.lo[0] + 1; // get exp_preds float* exp_preds[n]; // get first exp_pred and row and out_dim Domain exp_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); exp_preds[0] = helperGetTensorPointerWO<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); coord_t rows = exp_domain.hi()[1] - exp_domain.lo()[1] + 1; assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); for(int i = 1; i < n; i++) { exp_domain = runtime->get_index_space_domain( ctx, task->regions[i+2].region.get_index_space()); exp_preds[i] = helperGetTensorPointerWO<float>( regions[i+2], task->regions[i+2], FID_DATA, ctx, runtime); assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1); assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); } hipStream_t stream; checkCUDA(get_legion_stream(&stream)); checkCUDA(hipblasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); // call forward kernel hipMemcpy(m->dev_region_ptrs, exp_preds, n*sizeof(float*), hipMemcpyHostToDevice); hipLaunchKernelGGL(( aggspec_forward_kernel), dim3(GET_BLOCKS(batch_size*k*out_dim)), dim3(min(CUDA_NUM_THREADS,(int)(batch_size*k*out_dim))), 0, stream, m->dev_region_ptrs, acc_gate_assign.ptr(rect_gate_assign), acc_output.ptr(rect_output), n, k, rows, batch_size, out_dim); } void AggregateSpec::backward_task(const Task *task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { const AggregateSpecMeta* m = *((AggregateSpecMeta**)task->local_args); int n = ((AggregateSpec*)task->args)->n; float lambda_bal = ((AggregateSpec*)task->args)->lambda_bal; assert((int)regions.size() == n+5); assert((int)task->regions.size() == n+5); // get gate_pred, gate_assin, full_gate_grad, output_grad const AccessorRO<float, 2> acc_gate_pred(regions[0], FID_DATA); const AccessorRO<int, 2> acc_gate_assign(regions[1], FID_DATA); const AccessorRO<int, 2> acc_true_gate_assign(regions[2], FID_DATA); const AccessorWO<float, 2> acc_full_gate_grad(regions[3], FID_DATA); const AccessorRO<float, 2> acc_output_grad(regions[n+4], FID_DATA); Rect<2> rect_gate_pred = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<2> rect_gate_assign = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Rect<2> rect_true_gate_assign = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); Rect<2> rect_full_gate_grad = runtime->get_index_space_domain( ctx, task->regions[3].region.get_index_space()); Rect<2> rect_out_grad = runtime->get_index_space_domain( ctx, task->regions[n+4].region.get_index_space()); coord_t batch_size = rect_gate_pred.hi[1] - rect_gate_pred.lo[1] + 1; assert(batch_size == rect_gate_assign.hi[1] - rect_gate_assign.lo[1] + 1); assert(rect_gate_assign == rect_true_gate_assign); assert(batch_size == rect_full_gate_grad.hi[1] - rect_full_gate_grad.lo[1] + 1); coord_t k = rect_gate_assign.hi[0] - rect_gate_assign.lo[0] + 1; assert(k*batch_size == rect_out_grad.hi[1] - rect_out_grad.lo[1] + 1); assert(rect_gate_pred.hi[0] - rect_gate_pred.lo[0] + 1 == k); coord_t out_dim = rect_out_grad.hi[0] - rect_out_grad.lo[0] + 1; assert(n == rect_full_gate_grad.hi[0] - rect_full_gate_grad.lo[0] + 1); // get exp_preds float* exp_grads[n]; // get first exp_pred and row Domain exp_domain = runtime->get_index_space_domain( ctx, task->regions[4].region.get_index_space()); exp_grads[0] = helperGetTensorPointerRW<float>( regions[4], task->regions[4], FID_DATA, ctx, runtime); coord_t rows = exp_domain.hi()[1] - exp_domain.lo()[1] + 1; assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); for(int i = 1; i < n; i++) { exp_domain = runtime->get_index_space_domain( ctx, task->regions[i+4].region.get_index_space()); exp_grads[i] = helperGetTensorPointerRW<float>( regions[i+4], task->regions[i+4], FID_DATA, ctx, runtime); assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1); assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); } hipStream_t stream; checkCUDA(get_legion_stream(&stream)); checkCUDA(hipblasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); // call backward kernel hipMemcpy(m->dev_region_ptrs, exp_grads, n*sizeof(float*), hipMemcpyHostToDevice); hipLaunchKernelGGL(( aggspec_backward_kernel), dim3(GET_BLOCKS(batch_size*k*out_dim)), dim3(min(CUDA_NUM_THREADS,(int)(batch_size*k*out_dim))), 0, stream, m->dev_region_ptrs, acc_gate_assign.ptr(rect_gate_assign), acc_true_gate_assign.ptr(rect_true_gate_assign), acc_gate_pred.ptr(rect_gate_pred), acc_full_gate_grad.ptr(rect_full_gate_grad), acc_output_grad.ptr(rect_out_grad), n, k, rows, lambda_bal, batch_size, out_dim); } void AggregateSpec::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(AGG_SPEC_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(AggregateSpec)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // gate_preds launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // gate_assign launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); // exp_preds for(int i = 0; i < n; i++) { launcher.add_region_requirement( RegionRequirement(input_lps[i+4], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i+4].region)); launcher.add_field(i+2, FID_DATA); } // output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(n+2, FID_DATA); runtime->execute_index_space(ctx, launcher); } void AggregateSpec::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(AGG_SPEC_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(AggregateSpec)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // gate_preds launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // gate_assign launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); // true gate_assign launcher.add_region_requirement( RegionRequirement(input_lps[2], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[2].region)); launcher.add_field(2, FID_DATA); // gate gradients full launcher.add_region_requirement( RegionRequirement(input_grad_lps[3], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[3].region_grad)); launcher.add_field(3, FID_DATA); // exp gradients for(int i = 0; i < n; i++) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[i+4], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i+4].region_grad)); launcher.add_field(i+4, FID_DATA); } // output launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(n+4, FID_DATA); runtime->execute_index_space(ctx, launcher); } AggregateSpecMeta::AggregateSpecMeta(FFHandler handler, int n) : OpMeta(handler) { checkCUDA(hipMalloc(&dev_region_ptrs, n*sizeof(float*))); } AggregateSpecMeta::~AggregateSpecMeta(void) { checkCUDA(hipFree(&dev_region_ptrs)); } bool AggregateSpec::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { //TODO: implement cost_metrics.forward_time = 0.0f; cost_metrics.backward_time = 0.0f; cost_metrics.memory_requirement = 0; return false; }
f6c97f6fe38ad7df718f7609abffe763cad198a8.cu
/* Copyright 2019 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" #define MAX_K 4 #define MAX_BATCH_SIZE 32 #define MAX_N 12 Tensor FFModel::aggregate_spec(const Tensor* inputs, /* gate_preds, gate_assign, full_gate_pred, n * exp_pred */ int n, float lambda_bal, const char* name) { AggregateSpec* aggr = new AggregateSpec(*this, inputs, n, lambda_bal, name); layers.push_back(aggr); return aggr->outputs[0]; } AggregateSpec::AggregateSpec(FFModel& model, const Tensor* _inputs, int _n, float _lambda_bal, const char* name) : Op(model, OP_AGG_SPEC, name, _n+4, _inputs), n(_n), lambda_bal(_lambda_bal), profiling(model.config.profiling) { // FIXME: For now, set upper limits Better: Do as follows, but memory is // assigned per block, so requires to check that // https://stackoverflow.com/questions/5531247/allocating-shared-memory/5531640#5531640 assert(n <= MAX_N && "Increase MAX_N in #define"); assert(inputs[0].adim[0] <= MAX_K && "Increase MAX_K in #define"); assert(inputs[0].adim[1] <= MAX_BATCH_SIZE && "Increase MAX_BATCH_SIZE in #define"); assert(n+4 == numInputs); assert(n > 0); assert(inputs[0].numDim == 2); assert(inputs[1].numDim == 2); assert(inputs[2].numDim == 2); assert(inputs[3].numDim == 2); for(int i = 0; i < inputs[0].numDim; i++) { assert(inputs[0].adim[i] == inputs[1].adim[i]); assert(inputs[0].adim[i] == inputs[2].adim[i]); } assert(inputs[0].adim[1] == inputs[3].adim[1]); assert(inputs[3].adim[0] == n); // expert inputs int num_dim = inputs[4].numDim; int out_dim = inputs[4].adim[0]; for(int i = 1; i < n; i++) { assert(inputs[i+4].numDim == num_dim); assert(inputs[i+4].adim[0] == out_dim); } // output outputs[0].numDim = num_dim; int k = inputs[0].adim[0]; for(int i = 0; i < num_dim-1; i++) outputs[0].adim[i] = inputs[4].adim[i]; outputs[0].adim[num_dim-1] = k*inputs[0].adim[num_dim-1]; numWeights = 0; } void AggregateSpec::create_weights(FFModel& model) { // Do nothing } void AggregateSpec::create_output_and_partition(FFModel& model) { // Retrieve the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is); // Can only partition over the sample dim assert(part_rect.hi[0] == part_rect.lo[0]); int batch_size = inputs[0].adim[1]; int out_dim = inputs[4].adim[0]; int k = inputs[0].adim[0]; const int dims[2] = {k*batch_size, out_dim}; outputs[0] = model.create_tensor<2>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; // Compute partition bound for input for(int i = 0; i < n+4; i++) { Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[i].part.get_index_partition()); if (input_rect == part_rect) { input_lps[i] = inputs[i].part; input_grad_lps[i] = inputs[i].part_grad; } else { model.create_disjoint_partition<2>( inputs[i], (IndexSpaceT<2>)task_is, input_lps[i], input_grad_lps[i]); } } } OpMeta* AggregateSpec::init_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { AggregateSpec* agg = (AggregateSpec*) task->args; FFHandler handle = *((FFHandler*)task->local_args); AggregateSpecMeta* m = new AggregateSpecMeta(handle, agg->n); m->profiling = agg->profiling; return m; } void AggregateSpec::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ ParallelConfig pc; \ std::string pcname = name; \ ff.config.find_parallel_config(DIM, pcname, pc); \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \ argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(AGG_SPEC_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(AggregateSpec)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } __global__ void aggspec_forward_kernel(float** exp_preds, const int* exp_assign, float* output, int n, // num experts const int k, // num chosen experts int exp_samples, // max samples per expert const int batch_size, int out_dim) { __shared__ float* chosen_exp_preds[MAX_K*MAX_BATCH_SIZE]; // Get pred pointers, single thread per block if(threadIdx.x == 0) { int expert_idx[MAX_N] = {0}; for(int i = 0; i < batch_size; i++) { for(int j = 0; j < k; j++) { // Get pointer to chosen expert predictions int expert = exp_assign[i*k+j]; if(expert_idx[expert] >= exp_samples) { // dropped sample chosen_exp_preds[i*k+j] = 0; continue; } chosen_exp_preds[i*k+j] = exp_preds[expert] + expert_idx[expert]*out_dim; expert_idx[expert]++; } } } __syncthreads(); // compute output CUDA_KERNEL_LOOP(i, k*batch_size*out_dim) { if(chosen_exp_preds[i/out_dim] != 0) { output[i] = chosen_exp_preds[i/out_dim][i%out_dim]; } else { output[i] = 0.0f; } } } __device__ void aggspec_backward_kernel_gate(const float* output_grad, float* full_gate_grads, const int* expert_assign, const bool* cache_corr, const float* gate_pred, int* expert_bal, float lambda_bal, int batch_size, int k, int n, int out_dim) { __shared__ float gate_grad_sum[MAX_BATCH_SIZE]; // init gate_grad_sum to 0 CUDA_KERNEL_LOOP(i, batch_size) { gate_grad_sum[i] = 0.0f; } __syncthreads(); // get sum of expert errors /* NOTE: Errors just squared L2 norm of gradients. * batch_size because the expert gradients are /= batch_size and then it would be /= batch_size^2 here */ CUDA_KERNEL_LOOP(i, batch_size*k*out_dim) { if(cache_corr[i/(k*out_dim)]) { float res = output_grad[i] * output_grad[i] * batch_size; float* gate_grad_idx = full_gate_grads + (i/(out_dim*k))*n + expert_assign[(i/(out_dim*k))*k+(i/out_dim)%k]; atomicAdd(gate_grad_idx, res); atomicAdd(gate_grad_sum+i/(k*out_dim), res); } } // Compute gate gradients: // Assigned expert i, sample j: pred(i,j) - err_(i,j)/sum_l err(l,j) __syncthreads(); CUDA_KERNEL_LOOP(i, k*batch_size) { if(cache_corr[i/k]) { full_gate_grads[i/k*n + expert_assign[i]] /= gate_grad_sum[i/k]; full_gate_grads[i/k*n + expert_assign[i]] -= (1.0f - gate_pred[i]); } } // balance term __syncthreads(); CUDA_KERNEL_LOOP(i, n*batch_size) { full_gate_grads[i] += lambda_bal*expert_bal[i%n]; } __syncthreads(); // make 0 mean CUDA_KERNEL_LOOP(i, n*batch_size) { int start = (i/n)*n; float sub = -full_gate_grads[i]/n; for(int j = 0; j < n; j++) { atomicAdd(full_gate_grads+start+j, sub); } } } __device__ void aggspec_backward_kernel_exp(const float* output_grad, const float* gate_preds, float** exp_grads, int batch_size, int k, int out_dim) { // compute expert gradients CUDA_KERNEL_LOOP(i, k*out_dim*batch_size) { if (exp_grads[i/out_dim] != 0) { exp_grads[i/out_dim][i%out_dim] += gate_preds[i/out_dim] * output_grad[i]; } } } __global__ void aggspec_backward_kernel(float** exp_grads, const int* exp_assign, const int* true_exp_assign, const float* gating_net_preds, float* full_gating_grads, const float* output_grads, int n, // num experts int k, // num chosen experts int exp_samples, // max samples per expert float lambda_bal, int batch_size, int out_dim) { __shared__ float* chosen_exp_grads[MAX_K*MAX_BATCH_SIZE]; __shared__ int expert_bal[MAX_N]; __shared__ bool cache_corr[MAX_BATCH_SIZE]; // Get pred pointers, single thread per block if(threadIdx.x == 0) { // init arrays for(int i = 0; i < n; i++) expert_bal[i] = 0; for(int i = 0; i < batch_size; i++) cache_corr[i] = true; // Get pointer to chosen expert grads and expert counts for(int i = 0; i < batch_size; i++) { for(int j = 0; j < k; j++) { int expert = true_exp_assign[k*i + j]; if(expert != exp_assign[k*i + j]) cache_corr[i] = false; if(expert_bal[expert] >= exp_samples) { // dropped sample chosen_exp_grads[i*k+j] = 0; expert_bal[expert]++; continue; } chosen_exp_grads[i*k+j] = exp_grads[expert] + expert_bal[expert]*out_dim; expert_bal[expert]++; } } } __syncthreads(); // NOTE: These 2 functions could execute independently in parallel // get expert gradients aggspec_backward_kernel_exp(output_grads, gating_net_preds, chosen_exp_grads, batch_size, k, out_dim); // get gating net gradients aggspec_backward_kernel_gate(output_grads, full_gating_grads, exp_assign, cache_corr, gating_net_preds, expert_bal, (lambda_bal*n)/batch_size, batch_size, k, n, out_dim); } void AggregateSpec::forward_task(const Task *task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { int n = ((AggregateSpec*)task->args)->n; assert((int)regions.size() == n+3); assert((int)task->regions.size() == n+3); const AggregateSpecMeta* m = *((AggregateSpecMeta**)task->local_args); // get gate_pred, gate_assign, output const AccessorRO<float, 2> acc_gate_pred(regions[0], FID_DATA); const AccessorRO<int, 2> acc_gate_assign(regions[1], FID_DATA); const AccessorWO<float, 2> acc_output(regions[n+2], FID_DATA); Rect<2> rect_gate_pred = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<2> rect_gate_assign = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Rect<2> rect_output = runtime->get_index_space_domain( ctx, task->regions[n+2].region.get_index_space()); coord_t batch_size = rect_gate_pred.hi[1] - rect_gate_pred.lo[1] + 1; assert(batch_size == rect_gate_assign.hi[1] - rect_gate_assign.lo[1] + 1); coord_t k = rect_gate_pred.hi[0] - rect_gate_pred.lo[0] + 1; assert(k == rect_gate_assign.hi[0] - rect_gate_assign.lo[0] + 1); assert(k*batch_size == rect_output.hi[1] - rect_output.lo[1] + 1); coord_t out_dim = rect_output.hi[0] - rect_output.lo[0] + 1; // get exp_preds float* exp_preds[n]; // get first exp_pred and row and out_dim Domain exp_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); exp_preds[0] = helperGetTensorPointerWO<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); coord_t rows = exp_domain.hi()[1] - exp_domain.lo()[1] + 1; assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); for(int i = 1; i < n; i++) { exp_domain = runtime->get_index_space_domain( ctx, task->regions[i+2].region.get_index_space()); exp_preds[i] = helperGetTensorPointerWO<float>( regions[i+2], task->regions[i+2], FID_DATA, ctx, runtime); assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1); assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); } cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); // call forward kernel cudaMemcpy(m->dev_region_ptrs, exp_preds, n*sizeof(float*), cudaMemcpyHostToDevice); aggspec_forward_kernel<<<GET_BLOCKS(batch_size*k*out_dim), min(CUDA_NUM_THREADS,(int)(batch_size*k*out_dim)), 0, stream>>>(m->dev_region_ptrs, acc_gate_assign.ptr(rect_gate_assign), acc_output.ptr(rect_output), n, k, rows, batch_size, out_dim); } void AggregateSpec::backward_task(const Task *task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { const AggregateSpecMeta* m = *((AggregateSpecMeta**)task->local_args); int n = ((AggregateSpec*)task->args)->n; float lambda_bal = ((AggregateSpec*)task->args)->lambda_bal; assert((int)regions.size() == n+5); assert((int)task->regions.size() == n+5); // get gate_pred, gate_assin, full_gate_grad, output_grad const AccessorRO<float, 2> acc_gate_pred(regions[0], FID_DATA); const AccessorRO<int, 2> acc_gate_assign(regions[1], FID_DATA); const AccessorRO<int, 2> acc_true_gate_assign(regions[2], FID_DATA); const AccessorWO<float, 2> acc_full_gate_grad(regions[3], FID_DATA); const AccessorRO<float, 2> acc_output_grad(regions[n+4], FID_DATA); Rect<2> rect_gate_pred = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Rect<2> rect_gate_assign = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Rect<2> rect_true_gate_assign = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); Rect<2> rect_full_gate_grad = runtime->get_index_space_domain( ctx, task->regions[3].region.get_index_space()); Rect<2> rect_out_grad = runtime->get_index_space_domain( ctx, task->regions[n+4].region.get_index_space()); coord_t batch_size = rect_gate_pred.hi[1] - rect_gate_pred.lo[1] + 1; assert(batch_size == rect_gate_assign.hi[1] - rect_gate_assign.lo[1] + 1); assert(rect_gate_assign == rect_true_gate_assign); assert(batch_size == rect_full_gate_grad.hi[1] - rect_full_gate_grad.lo[1] + 1); coord_t k = rect_gate_assign.hi[0] - rect_gate_assign.lo[0] + 1; assert(k*batch_size == rect_out_grad.hi[1] - rect_out_grad.lo[1] + 1); assert(rect_gate_pred.hi[0] - rect_gate_pred.lo[0] + 1 == k); coord_t out_dim = rect_out_grad.hi[0] - rect_out_grad.lo[0] + 1; assert(n == rect_full_gate_grad.hi[0] - rect_full_gate_grad.lo[0] + 1); // get exp_preds float* exp_grads[n]; // get first exp_pred and row Domain exp_domain = runtime->get_index_space_domain( ctx, task->regions[4].region.get_index_space()); exp_grads[0] = helperGetTensorPointerRW<float>( regions[4], task->regions[4], FID_DATA, ctx, runtime); coord_t rows = exp_domain.hi()[1] - exp_domain.lo()[1] + 1; assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); for(int i = 1; i < n; i++) { exp_domain = runtime->get_index_space_domain( ctx, task->regions[i+4].region.get_index_space()); exp_grads[i] = helperGetTensorPointerRW<float>( regions[i+4], task->regions[i+4], FID_DATA, ctx, runtime); assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1); assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1); } cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); checkCUDA(cublasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); // call backward kernel cudaMemcpy(m->dev_region_ptrs, exp_grads, n*sizeof(float*), cudaMemcpyHostToDevice); aggspec_backward_kernel<<<GET_BLOCKS(batch_size*k*out_dim), min(CUDA_NUM_THREADS,(int)(batch_size*k*out_dim)), 0, stream>>>( m->dev_region_ptrs, acc_gate_assign.ptr(rect_gate_assign), acc_true_gate_assign.ptr(rect_true_gate_assign), acc_gate_pred.ptr(rect_gate_pred), acc_full_gate_grad.ptr(rect_full_gate_grad), acc_output_grad.ptr(rect_out_grad), n, k, rows, lambda_bal, batch_size, out_dim); } void AggregateSpec::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(AGG_SPEC_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(AggregateSpec)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // gate_preds launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // gate_assign launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); // exp_preds for(int i = 0; i < n; i++) { launcher.add_region_requirement( RegionRequirement(input_lps[i+4], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i+4].region)); launcher.add_field(i+2, FID_DATA); } // output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(n+2, FID_DATA); runtime->execute_index_space(ctx, launcher); } void AggregateSpec::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(AGG_SPEC_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(AggregateSpec)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // gate_preds launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // gate_assign launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); // true gate_assign launcher.add_region_requirement( RegionRequirement(input_lps[2], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[2].region)); launcher.add_field(2, FID_DATA); // gate gradients full launcher.add_region_requirement( RegionRequirement(input_grad_lps[3], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[3].region_grad)); launcher.add_field(3, FID_DATA); // exp gradients for(int i = 0; i < n; i++) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[i+4], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i+4].region_grad)); launcher.add_field(i+4, FID_DATA); } // output launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(n+4, FID_DATA); runtime->execute_index_space(ctx, launcher); } AggregateSpecMeta::AggregateSpecMeta(FFHandler handler, int n) : OpMeta(handler) { checkCUDA(cudaMalloc(&dev_region_ptrs, n*sizeof(float*))); } AggregateSpecMeta::~AggregateSpecMeta(void) { checkCUDA(cudaFree(&dev_region_ptrs)); } bool AggregateSpec::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { //TODO: implement cost_metrics.forward_time = 0.0f; cost_metrics.backward_time = 0.0f; cost_metrics.memory_requirement = 0; return false; }
d23c99e644c9d3cb735024a3926918f410a7e8e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "recfilter.h" const int WS = 32; #define ORDER 1 #include "alg5.cu" #undef ORDER #define ORDER 2 #include "alg5.cu" namespace rod { struct recfilter5_plan { recfilter5_plan() // at least these should be initialized to make // upload_plan work when plan is empty : a_in(NULL) , width(0) , height(0) , border(0) { } virtual ~recfilter5_plan() { if(a_in != NULL) hipFreeArray(a_in); } int width, height; int rowstride; float inv_width, inv_height; int m_size, n_size, last_m, last_n; BorderType border_type; int border; hipArray *a_in; }; template <int R> struct recfilter5_plan_R : recfilter5_plan { recfilter5_plan_R() { // this should be initialized for upload_plan for(int i=0; i<R; ++i) weights[i] = 0; } dvector<Matrix<float,R,WS> > d_pybar, d_ezhat, d_ptucheck, d_etvtilde; Vector<float, R+1> weights; Matrix<float,R,WS> AFP_T, ARE_T; Matrix<float,WS,WS> AFB_T, ARB_T; Matrix<float,R,R> AbF_T, AbR_T, AbF, AbR, HARB_AFP_T, HARB_AFP; Matrix<float,R,WS> ARB_AFP_T, TAFB, HARB_AFB; }; namespace { const recfilter5_plan *g_loaded_plan_in_gpu = NULL; template<int R> void load_plan(const recfilter5_plan_R<R> &plan) { const recfilter5_plan_R<R> *gpu_plan = dynamic_cast<const recfilter5_plan_R<R> *>(g_loaded_plan_in_gpu); const_data<R> &cdata = get_cdata<R>::get(); if(!gpu_plan || gpu_plan->weights != plan.weights) { copy_to_symbol(cdata.weights, plan.weights); copy_to_symbol(cdata.AbF_T, plan.AbF_T); copy_to_symbol(cdata.AbR_T, plan.AbR_T); copy_to_symbol(cdata.HARB_AFP_T, plan.HARB_AFP_T); copy_to_symbol(cdata.AbF, plan.AbF); copy_to_symbol(cdata.AbR, plan.AbR); copy_to_symbol(cdata.HARB_AFP, plan.HARB_AFP); copy_to_symbol(cdata.ARE_T, plan.ARE_T); copy_to_symbol(cdata.ARB_AFP_T, plan.ARB_AFP_T); copy_to_symbol(cdata.TAFB, plan.TAFB); copy_to_symbol(cdata.HARB_AFB, plan.HARB_AFB); } if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->border != plan.border) copy_to_symbol(cdata.border,plan.border); if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->rowstride!=plan.rowstride) copy_to_symbol(cdata.rowstride, plan.rowstride); if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->width != plan.width || g_loaded_plan_in_gpu->border != plan.border) { copy_to_symbol(cdata.width, plan.width); copy_to_symbol(cdata.inv_width, plan.inv_width); copy_to_symbol(cdata.m_size, plan.m_size); copy_to_symbol(cdata.last_m, plan.last_m); } if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->height != plan.height || g_loaded_plan_in_gpu->border != plan.border) { copy_to_symbol(cdata.inv_height, plan.inv_height); copy_to_symbol(cdata.height, plan.height); copy_to_symbol(cdata.n_size, plan.n_size); copy_to_symbol(cdata.last_n, plan.last_n); } if(!g_loaded_plan_in_gpu) { t_in.normalized = true; t_in.filterMode = hipFilterModePoint; } if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->border_type != plan.border_type) { switch(plan.border_type) { case CLAMP_TO_ZERO: t_in.addressMode[0] = t_in.addressMode[1] = hipAddressModeBorder; break; case CLAMP_TO_EDGE: t_in.addressMode[0] = t_in.addressMode[1] = hipAddressModeClamp; break; case REPEAT: t_in.addressMode[0] = t_in.addressMode[1] = hipAddressModeWrap; break; case REFLECT: t_in.addressMode[0] = t_in.addressMode[1] = hipAddressModeMirror; break; } } g_loaded_plan_in_gpu = &plan; } } // local namespace template <int R> void recfilter5(recfilter5_plan_R<R> &plan,float *d_output,const float *d_input) { load_plan(plan); hipMemcpy2DToArray(plan.a_in, 0, 0, d_input, plan.rowstride*sizeof(float), plan.width*sizeof(float), plan.height, hipMemcpyDeviceToDevice); hipBindTextureToArray(t_in, plan.a_in); hipLaunchKernelGGL(( collect_carries), #if CUDA_SM >= 20 dim3((plan.m_size+2-1)/2, plan.n_size), #else dim3(plan.m_size, plan.n_size), #endif dim3(WS, W1) , 0, &plan.d_pybar, &plan.d_ezhat, &plan.d_ptucheck, &plan.d_etvtilde); hipLaunchKernelGGL(( adjust_carries), dim3(dim3(1,plan.n_size)), dim3(dim3(WS, std::min<int>(plan.m_size, W23))) , 0, 0, &plan.d_pybar, &plan.d_ezhat, plan.m_size, plan.n_size ); hipLaunchKernelGGL(( adjust_carries), dim3(dim3(plan.m_size,1)), dim3(dim3(WS, std::min<int>(plan.n_size, W45))) , 0, 0, &plan.d_ptucheck, &plan.d_etvtilde, &plan.d_pybar, &plan.d_ezhat, plan.m_size, plan.n_size ); hipLaunchKernelGGL(( write_result), #if CUDA_SM >= 20 dim3((plan.m_size+2-1)/2,plan.n_size), #else dim3(plan.m_size,plan.n_size), #endif dim3(WS, W6), 0, d_output, &plan.d_pybar, &plan.d_ezhat, &plan.d_ptucheck, &plan.d_etvtilde); hipUnbindTexture(t_in); } void recfilter5(recfilter5_plan *plan, float *d_output, const float *d_input) { assert(plan); if(recfilter5_plan_R<1> *plan_R = dynamic_cast<recfilter5_plan_R<1>*>(plan)) recfilter5(*plan_R, d_output, d_input); else if(recfilter5_plan_R<2> *plan_R = dynamic_cast<recfilter5_plan_R<2>*>(plan)) recfilter5(*plan_R, d_output, d_input); else throw std::runtime_error("Bad plan for recfilter5"); } void recfilter5(recfilter5_plan *plan, float *d_inout) { recfilter5(plan, d_inout, d_inout); } template <int R> recfilter5_plan * recfilter5_create_plan(int width, int height, int rowstride, const Vector<float, R+1> &w, BorderType border_type, int border) { recfilter5_plan_R<R> *plan = new recfilter5_plan_R<R>; try { update_plan<R>(plan, width, height, rowstride, w, border_type, border); load_plan(*plan); } catch(...) { delete plan; throw; } return plan; } void calc_borders(int *left, int *top, int *right, int *bottom, int w, int h, int border) { if(border > 0) { *left = border*32; *top = border*32; *right = (border+1)*32-(w%32); *bottom = (border+1)*32-(h%32); } else { *left = *top = 0; *right = 32-(w%32); if(*right == 32) *right = 0; *bottom = 32-(h%32); if(*bottom == 32) *bottom = 0; } } template <int R> void update_plan(recfilter5_plan *_plan, int width, int height, int rowstride, const Vector<float, R+1> &w, BorderType border_type, int border) { assert(_plan); recfilter5_plan_R<R> *plan = dynamic_cast<recfilter5_plan_R<R> *>(_plan); if(plan == NULL) throw std::invalid_argument("Can't change recfilter's plan order"); const int B = 32; int old_border = plan->border, old_width = plan->width, old_height = plan->height; if(old_width!=width || old_height!=height) { // let's do this first to at least have a passable strong // exception guarantee (this has more chance to blow up) hipArray *a_in = NULL; hipChannelFormatDesc ccd = hipCreateChannelDesc<float>(); hipMallocArray(&a_in, &ccd, width, height); check_cuda_error("hipMallocArray"); try { if(plan->a_in) { hipFreeArray(plan->a_in); check_cuda_error("hipFreeArray"); plan->a_in = NULL; } } catch(...) { hipFreeArray(a_in); throw; } plan->a_in = a_in; } if(plan->weights != w) { Matrix<float,R,R> Ir = identity<float,R,R>(); Matrix<float,B,R> Zbr = zeros<float,B,R>(); Matrix<float,R,B> Zrb = zeros<float,R,B>(); Matrix<float,B,B> Ib = identity<float,B,B>(); // depends on weight plan->weights = w; plan->AFP_T = fwd(Ir, Zrb, w); plan->ARE_T = rev(Zrb, Ir, w); plan->AFB_T = fwd(Zbr, Ib, w); plan->ARB_T = rev(Ib, Zbr, w); plan->AbF_T = tail<R>(plan->AFP_T); plan->AbR_T = head<R>(plan->ARE_T); plan->AbF = transp(plan->AbF_T); plan->AbR = transp(plan->AbR_T); plan->HARB_AFP_T = plan->AFP_T*head<R>(plan->ARB_T); plan->HARB_AFP = transp(plan->HARB_AFP_T); plan->ARB_AFP_T = plan->AFP_T*plan->ARB_T; plan->TAFB = transp(tail<R>(plan->AFB_T)); plan->HARB_AFB = transp(plan->AFB_T*head<R>(plan->ARB_T)); } int bleft, bright, btop, bbottom; calc_borders(&bleft, &btop, &bright, &bbottom, width, height, border); // depends on width and border if(old_border != border || old_width != width) { plan->m_size = (width+bleft+bright+WS-1)/WS, plan->last_m = (bleft+width-1)/WS; plan->width = width; plan->inv_width = 1.f/width; } // depends on height and border if(old_border != border || old_height != height) { plan->n_size = (height+btop+bbottom+WS-1)/WS; plan->last_n = (btop+height-1)/WS; plan->height = height; plan->inv_height = 1.f/height; } // depends on width, height and border if(old_border!=border || old_width!=width || old_height!=height) { // TODO: provide strong exception guarantee of previous data // in case of any of these blowing up. plan->d_pybar.resize(plan->n_size*plan->m_size); plan->d_ezhat.resize(plan->n_size*plan->m_size); plan->d_ptucheck.resize(plan->n_size*plan->m_size); plan->d_etvtilde.resize(plan->n_size*plan->m_size); } // depends on rowstride plan->rowstride = rowstride; // depends on border plan->border_type = border_type; plan->border = border; } template recfilter5_plan * recfilter5_create_plan<1>(int width, int height, int rowstride, const Vector<float, 1+1> &w, BorderType border_type, int border); template recfilter5_plan * recfilter5_create_plan<2>(int width, int height, int rowstride, const Vector<float, 2+1> &w, BorderType border_type, int border); void free(recfilter5_plan *plan) { if(g_loaded_plan_in_gpu == plan) g_loaded_plan_in_gpu = NULL; delete plan; } } // namespace rod
d23c99e644c9d3cb735024a3926918f410a7e8e0.cu
#include "recfilter.h" const int WS = 32; #define ORDER 1 #include "alg5.cu" #undef ORDER #define ORDER 2 #include "alg5.cu" namespace rod { struct recfilter5_plan { recfilter5_plan() // at least these should be initialized to make // upload_plan work when plan is empty : a_in(NULL) , width(0) , height(0) , border(0) { } virtual ~recfilter5_plan() { if(a_in != NULL) cudaFreeArray(a_in); } int width, height; int rowstride; float inv_width, inv_height; int m_size, n_size, last_m, last_n; BorderType border_type; int border; cudaArray *a_in; }; template <int R> struct recfilter5_plan_R : recfilter5_plan { recfilter5_plan_R() { // this should be initialized for upload_plan for(int i=0; i<R; ++i) weights[i] = 0; } dvector<Matrix<float,R,WS> > d_pybar, d_ezhat, d_ptucheck, d_etvtilde; Vector<float, R+1> weights; Matrix<float,R,WS> AFP_T, ARE_T; Matrix<float,WS,WS> AFB_T, ARB_T; Matrix<float,R,R> AbF_T, AbR_T, AbF, AbR, HARB_AFP_T, HARB_AFP; Matrix<float,R,WS> ARB_AFP_T, TAFB, HARB_AFB; }; namespace { const recfilter5_plan *g_loaded_plan_in_gpu = NULL; template<int R> void load_plan(const recfilter5_plan_R<R> &plan) { const recfilter5_plan_R<R> *gpu_plan = dynamic_cast<const recfilter5_plan_R<R> *>(g_loaded_plan_in_gpu); const_data<R> &cdata = get_cdata<R>::get(); if(!gpu_plan || gpu_plan->weights != plan.weights) { copy_to_symbol(cdata.weights, plan.weights); copy_to_symbol(cdata.AbF_T, plan.AbF_T); copy_to_symbol(cdata.AbR_T, plan.AbR_T); copy_to_symbol(cdata.HARB_AFP_T, plan.HARB_AFP_T); copy_to_symbol(cdata.AbF, plan.AbF); copy_to_symbol(cdata.AbR, plan.AbR); copy_to_symbol(cdata.HARB_AFP, plan.HARB_AFP); copy_to_symbol(cdata.ARE_T, plan.ARE_T); copy_to_symbol(cdata.ARB_AFP_T, plan.ARB_AFP_T); copy_to_symbol(cdata.TAFB, plan.TAFB); copy_to_symbol(cdata.HARB_AFB, plan.HARB_AFB); } if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->border != plan.border) copy_to_symbol(cdata.border,plan.border); if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->rowstride!=plan.rowstride) copy_to_symbol(cdata.rowstride, plan.rowstride); if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->width != plan.width || g_loaded_plan_in_gpu->border != plan.border) { copy_to_symbol(cdata.width, plan.width); copy_to_symbol(cdata.inv_width, plan.inv_width); copy_to_symbol(cdata.m_size, plan.m_size); copy_to_symbol(cdata.last_m, plan.last_m); } if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->height != plan.height || g_loaded_plan_in_gpu->border != plan.border) { copy_to_symbol(cdata.inv_height, plan.inv_height); copy_to_symbol(cdata.height, plan.height); copy_to_symbol(cdata.n_size, plan.n_size); copy_to_symbol(cdata.last_n, plan.last_n); } if(!g_loaded_plan_in_gpu) { t_in.normalized = true; t_in.filterMode = cudaFilterModePoint; } if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->border_type != plan.border_type) { switch(plan.border_type) { case CLAMP_TO_ZERO: t_in.addressMode[0] = t_in.addressMode[1] = cudaAddressModeBorder; break; case CLAMP_TO_EDGE: t_in.addressMode[0] = t_in.addressMode[1] = cudaAddressModeClamp; break; case REPEAT: t_in.addressMode[0] = t_in.addressMode[1] = cudaAddressModeWrap; break; case REFLECT: t_in.addressMode[0] = t_in.addressMode[1] = cudaAddressModeMirror; break; } } g_loaded_plan_in_gpu = &plan; } } // local namespace template <int R> void recfilter5(recfilter5_plan_R<R> &plan,float *d_output,const float *d_input) { load_plan(plan); cudaMemcpy2DToArray(plan.a_in, 0, 0, d_input, plan.rowstride*sizeof(float), plan.width*sizeof(float), plan.height, cudaMemcpyDeviceToDevice); cudaBindTextureToArray(t_in, plan.a_in); collect_carries<<< #if CUDA_SM >= 20 dim3((plan.m_size+2-1)/2, plan.n_size), #else dim3(plan.m_size, plan.n_size), #endif dim3(WS, W1) >>> (&plan.d_pybar, &plan.d_ezhat, &plan.d_ptucheck, &plan.d_etvtilde); adjust_carries<<< dim3(1,plan.n_size), dim3(WS, std::min<int>(plan.m_size, W23)) >>> (&plan.d_pybar, &plan.d_ezhat, plan.m_size, plan.n_size ); adjust_carries<<< dim3(plan.m_size,1), dim3(WS, std::min<int>(plan.n_size, W45)) >>> (&plan.d_ptucheck, &plan.d_etvtilde, &plan.d_pybar, &plan.d_ezhat, plan.m_size, plan.n_size ); write_result<<< #if CUDA_SM >= 20 dim3((plan.m_size+2-1)/2,plan.n_size), #else dim3(plan.m_size,plan.n_size), #endif dim3(WS, W6)>>> (d_output, &plan.d_pybar, &plan.d_ezhat, &plan.d_ptucheck, &plan.d_etvtilde); cudaUnbindTexture(t_in); } void recfilter5(recfilter5_plan *plan, float *d_output, const float *d_input) { assert(plan); if(recfilter5_plan_R<1> *plan_R = dynamic_cast<recfilter5_plan_R<1>*>(plan)) recfilter5(*plan_R, d_output, d_input); else if(recfilter5_plan_R<2> *plan_R = dynamic_cast<recfilter5_plan_R<2>*>(plan)) recfilter5(*plan_R, d_output, d_input); else throw std::runtime_error("Bad plan for recfilter5"); } void recfilter5(recfilter5_plan *plan, float *d_inout) { recfilter5(plan, d_inout, d_inout); } template <int R> recfilter5_plan * recfilter5_create_plan(int width, int height, int rowstride, const Vector<float, R+1> &w, BorderType border_type, int border) { recfilter5_plan_R<R> *plan = new recfilter5_plan_R<R>; try { update_plan<R>(plan, width, height, rowstride, w, border_type, border); load_plan(*plan); } catch(...) { delete plan; throw; } return plan; } void calc_borders(int *left, int *top, int *right, int *bottom, int w, int h, int border) { if(border > 0) { *left = border*32; *top = border*32; *right = (border+1)*32-(w%32); *bottom = (border+1)*32-(h%32); } else { *left = *top = 0; *right = 32-(w%32); if(*right == 32) *right = 0; *bottom = 32-(h%32); if(*bottom == 32) *bottom = 0; } } template <int R> void update_plan(recfilter5_plan *_plan, int width, int height, int rowstride, const Vector<float, R+1> &w, BorderType border_type, int border) { assert(_plan); recfilter5_plan_R<R> *plan = dynamic_cast<recfilter5_plan_R<R> *>(_plan); if(plan == NULL) throw std::invalid_argument("Can't change recfilter's plan order"); const int B = 32; int old_border = plan->border, old_width = plan->width, old_height = plan->height; if(old_width!=width || old_height!=height) { // let's do this first to at least have a passable strong // exception guarantee (this has more chance to blow up) cudaArray *a_in = NULL; cudaChannelFormatDesc ccd = cudaCreateChannelDesc<float>(); cudaMallocArray(&a_in, &ccd, width, height); check_cuda_error("cudaMallocArray"); try { if(plan->a_in) { cudaFreeArray(plan->a_in); check_cuda_error("cudaFreeArray"); plan->a_in = NULL; } } catch(...) { cudaFreeArray(a_in); throw; } plan->a_in = a_in; } if(plan->weights != w) { Matrix<float,R,R> Ir = identity<float,R,R>(); Matrix<float,B,R> Zbr = zeros<float,B,R>(); Matrix<float,R,B> Zrb = zeros<float,R,B>(); Matrix<float,B,B> Ib = identity<float,B,B>(); // depends on weight plan->weights = w; plan->AFP_T = fwd(Ir, Zrb, w); plan->ARE_T = rev(Zrb, Ir, w); plan->AFB_T = fwd(Zbr, Ib, w); plan->ARB_T = rev(Ib, Zbr, w); plan->AbF_T = tail<R>(plan->AFP_T); plan->AbR_T = head<R>(plan->ARE_T); plan->AbF = transp(plan->AbF_T); plan->AbR = transp(plan->AbR_T); plan->HARB_AFP_T = plan->AFP_T*head<R>(plan->ARB_T); plan->HARB_AFP = transp(plan->HARB_AFP_T); plan->ARB_AFP_T = plan->AFP_T*plan->ARB_T; plan->TAFB = transp(tail<R>(plan->AFB_T)); plan->HARB_AFB = transp(plan->AFB_T*head<R>(plan->ARB_T)); } int bleft, bright, btop, bbottom; calc_borders(&bleft, &btop, &bright, &bbottom, width, height, border); // depends on width and border if(old_border != border || old_width != width) { plan->m_size = (width+bleft+bright+WS-1)/WS, plan->last_m = (bleft+width-1)/WS; plan->width = width; plan->inv_width = 1.f/width; } // depends on height and border if(old_border != border || old_height != height) { plan->n_size = (height+btop+bbottom+WS-1)/WS; plan->last_n = (btop+height-1)/WS; plan->height = height; plan->inv_height = 1.f/height; } // depends on width, height and border if(old_border!=border || old_width!=width || old_height!=height) { // TODO: provide strong exception guarantee of previous data // in case of any of these blowing up. plan->d_pybar.resize(plan->n_size*plan->m_size); plan->d_ezhat.resize(plan->n_size*plan->m_size); plan->d_ptucheck.resize(plan->n_size*plan->m_size); plan->d_etvtilde.resize(plan->n_size*plan->m_size); } // depends on rowstride plan->rowstride = rowstride; // depends on border plan->border_type = border_type; plan->border = border; } template recfilter5_plan * recfilter5_create_plan<1>(int width, int height, int rowstride, const Vector<float, 1+1> &w, BorderType border_type, int border); template recfilter5_plan * recfilter5_create_plan<2>(int width, int height, int rowstride, const Vector<float, 2+1> &w, BorderType border_type, int border); void free(recfilter5_plan *plan) { if(g_loaded_plan_in_gpu == plan) g_loaded_plan_in_gpu = NULL; delete plan; } } // namespace rod
846cad61ca3e40fb3440878256d6a113fa258536.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "sim.h" #include "utils.h" #include "mravenec.h" __global__ void sim_tickProgram(Level& level); __global__ void sim_updateTile(Level& level); // gets called once per Simulation tick void Simulation::tick(const dim3 blocks, const dim3 threads, const int unitCount) { _doStep = false; // sequentially called CUDA kernels will be executed in series // _level is in managed memory, so thats okay too if (unitCount > 0) sim_tickProgram __kernel_call((unitCount + 511) / 512, 512) (*_level); sim_updateTile __kernel_call(blocks, threads) (*_level); _ticks++; } __global__ void sim_tickProgram(Level& level) { int mravenec_i = blockIdx.x * 512 + threadIdx.x; if (mravenec_i >= 0 && mravenec_i < level.unitCap) { Mravenec& mravenec = level.mravenci[mravenec_i]; if (mravenec.alive()) mravenec.stepProgram(level); } } __global__ void sim_updateTile(Level& level) { int X = blockIdx.x * 32 + threadIdx.x; int Y = blockIdx.y * 32 + threadIdx.y; const float dT = 0.010; Tile* tile_ptr = level.tileAt(X, Y); Tile tile; if (tile_ptr != nullptr) { tile = *tile_ptr; Mravenec *m = level.mravenecAt(tile); if (m != nullptr) { if (m->posx == X && m->posy == Y) { // conduct temperature from the ant to the tile its standing on float a = sqrt(level.config.ant_conductivity * level.config.ground_conductivity); int dQ = int_amount(a * dT * (tile.temperature - m->temperature), tile.random); m->temperature += dQ; tile.temperature -= dQ; } } float local_cond = 0.f; switch (tile.type) { case TileType::Mravenec: case TileType::Food: case TileType::Void: local_cond = level.config.ground_conductivity; break; case TileType::Wall: local_cond = level.config.wall_conductivity; break; } // temperature conductivity { float dQ = 0.0f; for (int i = 0; i < 4; i++) { Tile* t = level.tileAt(X + dx[i], Y + dy[i]); float neighbor_conductivity = level.config.ground_conductivity; int neighbor_temp = level.config.ambient_temp; if (t != nullptr) { neighbor_temp = t->temperature; if (t->type == TileType::Wall) { neighbor_conductivity = level.config.wall_conductivity; } } dQ += sqrt(local_cond * neighbor_conductivity) * dT * (neighbor_temp - tile.temperature); } tile.temperature += int_amount(dQ, tile.random); } // food growth { int grow = int_amount(level.config.food_growth_speed, tile.random); int step = (int)level.config.food_growth_step; if (grow != 0) { if (tile.type == TileType::Void) { tile.type = TileType::Food; tile.state = 0; } if (tile.type == TileType::Food && tile.state < level.config.food_growth_max) tile.state += step <= 1 ? step : step / 2 + (rnd_next(tile.random) % step); } } // egg growth if (tile.type == TileType::Egg) { int growth = int_amount(level.config.egg_growth, tile.random); if (int(m->eggGrowth) + growth > 255) tile.type = TileType::Mravenec; else m->eggGrowth += growth; } // movement //if (tile.type == TileType::Void || tile.type == TileType::Food) { Mravenec* want_go_here[] = { 0, 0, 0, 0 }; int wgh_count = 0; // see, who wants to interfere with this tile for (int d = 0; d < 4; d++) { Tile* sousedni = level.tileAt(X + dx[d], Y + dy[d]); if (sousedni && sousedni->type == TileType::Mravenec) { Mravenec* m = level.mravenecAt(*sousedni); if (m && dir_back(d) == m->direction()) switch (m->decision()) { case Decision::LayEgg: case Decision::MoveAhead: want_go_here[d] = m; wgh_count++; break; } } } if (wgh_count > 0) { // decide on one int winner_i = 1 + (rnd_next(tile.random) % wgh_count); int d = 0; for (; winner_i; d++) if (want_go_here[d] != nullptr) winner_i--; Mravenec& winner = *(want_go_here[d - 1]); // perform their decided action switch (winner.decision()) { case Decision::MoveAhead: switch (tile.type) { case TileType::Food: winner.energy = min(255, winner.energy + (int(tile.state) & 0xff)); tile.type = TileType::Void; no_break; case TileType::Void: tile.type = TileType::Mravenec; tile.state = &winner - level.mravenci; winner.posx = X; winner.posy = Y; winner.decision(Decision::Success); break; } break; case Decision::LayEgg: switch (tile.type) { case TileType::Void: Mravenec* egg = level.allocUnit(); if (!egg) break; char* egg_start = (char*)egg; for (int i = 0; i < sizeof(Mravenec); i++) egg_start[i] = 0; tile.type = TileType::Egg; winner.copyMutatedProgramTo(egg, level.config.gene_change_prob, tile.random); egg->posx = X; egg->posy = Y; egg->alive(true); egg->temperature = winner.temperature / 2; winner.temperature /= 2; int transfer_e = min(winner.dynmemAt(winner.memPtr), winner.energy); egg->energy = transfer_e; winner.energy -= transfer_e; tile.state = egg - level.mravenci; break; case TileType::Egg: int seg_start = rnd_next(tile.random) % UNIT_PROGMEM_IRCOUNT; int seg_end = rnd_next(tile.random) % UNIT_PROGMEM_IRCOUNT; winner.copyMutatedProgramTo(m, level.config.gene_change_prob, tile.random, seg_start, seg_end); break; } break; default: break; } // leave others be - no interaction is to be interpreted as a failure } } } __syncthreads(); if (tile_ptr) { Mravenec* m = level.mravenecAt(tile); // move out condition if (tile.type == TileType::Mravenec && m && (m->posx != X || m->posy != Y)) { tile.state = m->dynmemAt(0); tile.type = TileType::Void; } // death condition if ((tile.type == TileType::Mravenec || tile.type == TileType::Egg) && m && (m->energy <= level.config.energy_death_th || m->temperature <= level.config.temp_death_th)) { tile.state = 1 << 8 | m->energy; tile.type = TileType::Food; level.freeUnit(m); } *tile_ptr = tile; } //printf("exit "); }
846cad61ca3e40fb3440878256d6a113fa258536.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include "sim.h" #include "utils.h" #include "mravenec.h" __global__ void sim_tickProgram(Level& level); __global__ void sim_updateTile(Level& level); // gets called once per Simulation tick void Simulation::tick(const dim3 blocks, const dim3 threads, const int unitCount) { _doStep = false; // sequentially called CUDA kernels will be executed in series // _level is in managed memory, so thats okay too if (unitCount > 0) sim_tickProgram __kernel_call((unitCount + 511) / 512, 512) (*_level); sim_updateTile __kernel_call(blocks, threads) (*_level); _ticks++; } __global__ void sim_tickProgram(Level& level) { int mravenec_i = blockIdx.x * 512 + threadIdx.x; if (mravenec_i >= 0 && mravenec_i < level.unitCap) { Mravenec& mravenec = level.mravenci[mravenec_i]; if (mravenec.alive()) mravenec.stepProgram(level); } } __global__ void sim_updateTile(Level& level) { int X = blockIdx.x * 32 + threadIdx.x; int Y = blockIdx.y * 32 + threadIdx.y; const float dT = 0.010; Tile* tile_ptr = level.tileAt(X, Y); Tile tile; if (tile_ptr != nullptr) { tile = *tile_ptr; Mravenec *m = level.mravenecAt(tile); if (m != nullptr) { if (m->posx == X && m->posy == Y) { // conduct temperature from the ant to the tile its standing on float a = sqrt(level.config.ant_conductivity * level.config.ground_conductivity); int dQ = int_amount(a * dT * (tile.temperature - m->temperature), tile.random); m->temperature += dQ; tile.temperature -= dQ; } } float local_cond = 0.f; switch (tile.type) { case TileType::Mravenec: case TileType::Food: case TileType::Void: local_cond = level.config.ground_conductivity; break; case TileType::Wall: local_cond = level.config.wall_conductivity; break; } // temperature conductivity { float dQ = 0.0f; for (int i = 0; i < 4; i++) { Tile* t = level.tileAt(X + dx[i], Y + dy[i]); float neighbor_conductivity = level.config.ground_conductivity; int neighbor_temp = level.config.ambient_temp; if (t != nullptr) { neighbor_temp = t->temperature; if (t->type == TileType::Wall) { neighbor_conductivity = level.config.wall_conductivity; } } dQ += sqrt(local_cond * neighbor_conductivity) * dT * (neighbor_temp - tile.temperature); } tile.temperature += int_amount(dQ, tile.random); } // food growth { int grow = int_amount(level.config.food_growth_speed, tile.random); int step = (int)level.config.food_growth_step; if (grow != 0) { if (tile.type == TileType::Void) { tile.type = TileType::Food; tile.state = 0; } if (tile.type == TileType::Food && tile.state < level.config.food_growth_max) tile.state += step <= 1 ? step : step / 2 + (rnd_next(tile.random) % step); } } // egg growth if (tile.type == TileType::Egg) { int growth = int_amount(level.config.egg_growth, tile.random); if (int(m->eggGrowth) + growth > 255) tile.type = TileType::Mravenec; else m->eggGrowth += growth; } // movement //if (tile.type == TileType::Void || tile.type == TileType::Food) { Mravenec* want_go_here[] = { 0, 0, 0, 0 }; int wgh_count = 0; // see, who wants to interfere with this tile for (int d = 0; d < 4; d++) { Tile* sousedni = level.tileAt(X + dx[d], Y + dy[d]); if (sousedni && sousedni->type == TileType::Mravenec) { Mravenec* m = level.mravenecAt(*sousedni); if (m && dir_back(d) == m->direction()) switch (m->decision()) { case Decision::LayEgg: case Decision::MoveAhead: want_go_here[d] = m; wgh_count++; break; } } } if (wgh_count > 0) { // decide on one int winner_i = 1 + (rnd_next(tile.random) % wgh_count); int d = 0; for (; winner_i; d++) if (want_go_here[d] != nullptr) winner_i--; Mravenec& winner = *(want_go_here[d - 1]); // perform their decided action switch (winner.decision()) { case Decision::MoveAhead: switch (tile.type) { case TileType::Food: winner.energy = min(255, winner.energy + (int(tile.state) & 0xff)); tile.type = TileType::Void; no_break; case TileType::Void: tile.type = TileType::Mravenec; tile.state = &winner - level.mravenci; winner.posx = X; winner.posy = Y; winner.decision(Decision::Success); break; } break; case Decision::LayEgg: switch (tile.type) { case TileType::Void: Mravenec* egg = level.allocUnit(); if (!egg) break; char* egg_start = (char*)egg; for (int i = 0; i < sizeof(Mravenec); i++) egg_start[i] = 0; tile.type = TileType::Egg; winner.copyMutatedProgramTo(egg, level.config.gene_change_prob, tile.random); egg->posx = X; egg->posy = Y; egg->alive(true); egg->temperature = winner.temperature / 2; winner.temperature /= 2; int transfer_e = min(winner.dynmemAt(winner.memPtr), winner.energy); egg->energy = transfer_e; winner.energy -= transfer_e; tile.state = egg - level.mravenci; break; case TileType::Egg: int seg_start = rnd_next(tile.random) % UNIT_PROGMEM_IRCOUNT; int seg_end = rnd_next(tile.random) % UNIT_PROGMEM_IRCOUNT; winner.copyMutatedProgramTo(m, level.config.gene_change_prob, tile.random, seg_start, seg_end); break; } break; default: break; } // leave others be - no interaction is to be interpreted as a failure } } } __syncthreads(); if (tile_ptr) { Mravenec* m = level.mravenecAt(tile); // move out condition if (tile.type == TileType::Mravenec && m && (m->posx != X || m->posy != Y)) { tile.state = m->dynmemAt(0); tile.type = TileType::Void; } // death condition if ((tile.type == TileType::Mravenec || tile.type == TileType::Egg) && m && (m->energy <= level.config.energy_death_th || m->temperature <= level.config.temp_death_th)) { tile.state = 1 << 8 | m->energy; tile.type = TileType::Food; level.freeUnit(m); } *tile_ptr = tile; } //printf("exit "); }
151e50878e9b841edb2e5a68643c12606fd77f0c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <Environment.h> #include <loops/transform_bool.h> #include <types/types.h> #include <op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformBoolSimple(void *x, Nd4jLong *xShapeInfo, int xRank, void *params, void *z, Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { functions::transform::TransformBool<X,Z>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer,tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformBool<X,Y>::executeTransformShaped(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_BOOL_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformBool<X,Z>::transformCuda(void *vx, Nd4jLong *xShapeInfo, void *vparams, void *vz, Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto params = static_cast<X*>(vparams); auto reductionPointer = static_cast<Z*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo, length); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo, length); auto zOffset = shape::getIndexOffset(i, zShapeInfo, length); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformBool<X,Z>::intermediateShaped(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { hipLaunchKernelGGL(( transformBoolSimple<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); nd4j::DebugHelper::checkErrorCode(stream, "transformBool(...) failed"); } template<typename X, typename Z> void TransformBool<X,Z>::exec(int opNum, void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { } template<typename X, typename Z> template <typename OpType> void TransformBool<X,Z>::exec(void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformBool, , LIBND4J_TYPES, BOOL_TYPES); } }
151e50878e9b841edb2e5a68643c12606fd77f0c.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <Environment.h> #include <loops/transform_bool.h> #include <types/types.h> #include <op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformBoolSimple(void *x, Nd4jLong *xShapeInfo, int xRank, void *params, void *z, Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { functions::transform::TransformBool<X,Z>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer,tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformBool<X,Y>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_BOOL_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformBool<X,Z>::transformCuda(void *vx, Nd4jLong *xShapeInfo, void *vparams, void *vz, Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto params = static_cast<X*>(vparams); auto reductionPointer = static_cast<Z*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo, length); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo, length); auto zOffset = shape::getIndexOffset(i, zShapeInfo, length); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformBool<X,Z>::intermediateShaped(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { transformBoolSimple<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); nd4j::DebugHelper::checkErrorCode(stream, "transformBool(...) failed"); } template<typename X, typename Z> void TransformBool<X,Z>::exec(int opNum, void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { } template<typename X, typename Z> template <typename OpType> void TransformBool<X,Z>::exec(void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformBool, , LIBND4J_TYPES, BOOL_TYPES); } }
167595e40455e9b073ab0c98fce061df5fa2d0e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void KernelPrintInts(const int* p, int len) { for (int i = 0; i < len; ++i) { printf("%d\n", p[i]); } }
167595e40455e9b073ab0c98fce061df5fa2d0e3.cu
#include "includes.h" __global__ void KernelPrintInts(const int* p, int len) { for (int i = 0; i < len; ++i) { printf("%d\n", p[i]); } }
stripe_init.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_gpu.hpp" #include <cudf/io/orc_types.hpp> #include <io/utilities/block_utils.cuh> #include <hipcub/hipcub.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/copy.h> #include <thrust/execution_policy.h> namespace cudf { namespace io { namespace orc { namespace gpu { struct comp_in_out { uint8_t const* in_ptr; size_t in_size; uint8_t* out_ptr; size_t out_size; }; struct compressed_stream_s { CompressedStreamInfo info; comp_in_out ctl; }; // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData( CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) { s->info = strm_info[strm_id]; } __syncthreads(); if (strm_id < num_streams) { // Walk through the compressed blocks uint8_t const* cur = s->info.compressed_data; uint8_t const* end = cur + s->info.compressed_data_size; uint8_t* uncompressed = s->info.uncompressed_data; size_t max_uncompressed_size = 0; uint32_t max_uncompressed_block_size = 0; uint32_t num_compressed_blocks = 0; uint32_t num_uncompressed_blocks = 0; while (cur + block_header_size < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); auto const is_uncompressed = static_cast<bool>(block_len & 1); uint32_t uncompressed_size; device_span<uint8_t const>* init_in_ctl = nullptr; device_span<uint8_t>* init_out_ctl = nullptr; block_len >>= 1; cur += block_header_size; if (block_len > block_size || cur + block_len > end) { // Fatal num_compressed_blocks = 0; max_uncompressed_size = 0; max_uncompressed_block_size = 0; break; } // TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual // uncompressed size and avoid waste due to block size alignment For now, rely on the max // compression ratio to limit waste for the most extreme cases (small single-block streams) uncompressed_size = (is_uncompressed) ? block_len : (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size; if (is_uncompressed) { if (uncompressed_size <= 32) { // For short blocks, copy the uncompressed data to output if (uncompressed && max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size && lane_id < uncompressed_size) { uncompressed[max_uncompressed_size + lane_id] = cur[lane_id]; } } else { init_in_ctl = (s->info.copy_in_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &s->info.copy_in_ctl[num_uncompressed_blocks] : nullptr; init_out_ctl = (s->info.copy_out_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &s->info.copy_out_ctl[num_uncompressed_blocks] : nullptr; num_uncompressed_blocks++; } } else { init_in_ctl = (s->info.dec_in_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &s->info.dec_in_ctl[num_compressed_blocks] : nullptr; init_out_ctl = (s->info.dec_out_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &s->info.dec_out_ctl[num_compressed_blocks] : nullptr; num_compressed_blocks++; } if (!lane_id && init_in_ctl) { s->ctl = {cur, block_len, uncompressed + max_uncompressed_size, uncompressed_size}; } __syncwarp(); if (init_in_ctl && lane_id == 0) { *init_in_ctl = {s->ctl.in_ptr, s->ctl.in_size}; *init_out_ctl = {s->ctl.out_ptr, s->ctl.out_size}; } cur += block_len; max_uncompressed_size += uncompressed_size; max_uncompressed_block_size = max(max_uncompressed_block_size, uncompressed_size); } __syncwarp(); if (!lane_id) { s->info.num_compressed_blocks = num_compressed_blocks; s->info.num_uncompressed_blocks = num_uncompressed_blocks; s->info.max_uncompressed_size = max_uncompressed_size; s->info.max_uncompressed_block_size = max_uncompressed_block_size; } } __syncthreads(); if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info; } // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuPostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id]; __syncthreads(); if (strm_id < num_streams && s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 && s->info.max_uncompressed_size > 0) { // Walk through the compressed blocks uint8_t const* cur = s->info.compressed_data; uint8_t const* end = cur + s->info.compressed_data_size; auto dec_out = s->info.dec_out_ctl; auto dec_result = s->info.dec_res; uint8_t* uncompressed_actual = s->info.uncompressed_data; uint8_t* uncompressed_estimated = uncompressed_actual; uint32_t num_compressed_blocks = 0; uint32_t max_compressed_blocks = s->info.num_compressed_blocks; while (cur + block_header_size < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); auto const is_uncompressed = static_cast<bool>(block_len & 1); uint32_t uncompressed_size_est, uncompressed_size_actual; block_len >>= 1; cur += block_header_size; if (cur + block_len > end) { break; } if (is_uncompressed) { uncompressed_size_est = block_len; uncompressed_size_actual = block_len; } else { if (num_compressed_blocks > max_compressed_blocks) { break; } uint32_t const dst_size = dec_out[num_compressed_blocks].size(); uncompressed_size_est = shuffle((lane_id == 0) ? dst_size : 0); uint32_t const bytes_written = dec_result[num_compressed_blocks].bytes_written; uncompressed_size_actual = shuffle((lane_id == 0) ? bytes_written : 0); } // In practice, this should never happen with a well-behaved writer, as we would expect the // uncompressed size to always be equal to the compression block size except for the last // block if (uncompressed_actual < uncompressed_estimated) { // warp-level memmove for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) { uncompressed_actual[i] = uncompressed_estimated[i]; } } cur += block_len; num_compressed_blocks += 1 - is_uncompressed; uncompressed_estimated += uncompressed_size_est; uncompressed_actual += uncompressed_size_actual; } // Update info with actual uncompressed size if (!lane_id) { size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data; // Set uncompressed size to zero if there were any errors strm_info[strm_id].max_uncompressed_size = (num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0; } } } /** * @brief Shared mem state for gpuParseRowGroupIndex */ struct rowindex_state_s { ColumnDesc chunk; uint32_t rowgroup_start; uint32_t rowgroup_end; int is_compressed; uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2 CompressedStreamInfo strm_info[2]; RowGroup rowgroups[128]; uint32_t compressed_offset[128][2]; }; enum row_entry_state_e { NOT_FOUND = 0, GET_LENGTH, SKIP_VARINT, SKIP_FIXEDLEN, STORE_INDEX0, STORE_INDEX1, STORE_INDEX2, }; /** * @brief Calculates the order of index streams based on the index types present in the column. * * @param index_types_bitmap The bitmap of index types showing which index streams are present * * @return The order of index streams */ static auto __device__ index_order_from_index_types(uint32_t index_types_bitmap) { constexpr std::array full_order = {CI_PRESENT, CI_DATA, CI_DATA2}; std::array<uint32_t, full_order.size()> partial_order; thrust::copy_if(thrust::seq, full_order.cbegin(), full_order.cend(), partial_order.begin(), [index_types_bitmap] __device__(auto index_type) { // Check if the index type is present return index_types_bitmap & (1 << index_type); }); return partial_order; } /** * @brief Decode a single row group index entry * * @param[in,out] s row group index state * @param[in] start start position in byte stream * @param[in] end end of byte stream * @return bytes consumed */ static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s* s, uint8_t const* const start, uint8_t const* const end) { constexpr uint32_t pb_rowindexentry_id = ProtofType::FIXEDLEN + 8; auto const stream_order = index_order_from_index_types(s->chunk.skip_count); uint8_t const* cur = start; row_entry_state_e state = NOT_FOUND; uint32_t length = 0; uint32_t idx_id = 0; uint32_t pos_end = 0; uint32_t ci_id = CI_NUM_STREAMS; while (cur < end) { uint32_t v = 0; for (uint32_t l = 0; l <= 28; l += 7) { uint32_t c = (cur < end) ? *cur++ : 0; v |= (c & 0x7f) << l; if (c <= 0x7f) break; } switch (state) { case NOT_FOUND: if (v == pb_rowindexentry_id) { state = GET_LENGTH; } else { v &= 7; if (v == ProtofType::FIXED64) cur += 8; else if (v == ProtofType::FIXED32) cur += 4; else if (v == ProtofType::VARINT) state = SKIP_VARINT; else if (v == ProtofType::FIXEDLEN) state = SKIP_FIXEDLEN; } break; case SKIP_VARINT: state = NOT_FOUND; break; case SKIP_FIXEDLEN: cur += v; state = NOT_FOUND; break; case GET_LENGTH: if (length == 0) { length = (uint32_t)(cur + v - start); state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry // entry) } else { pos_end = min((uint32_t)(cur + v - start), length); state = STORE_INDEX0; } break; case STORE_INDEX0: // Start of a new entry; determine the stream index types ci_id = stream_order[idx_id++]; if (s->is_compressed) { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v; if (cur >= start + pos_end) return length; state = STORE_INDEX1; break; } else { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0; // Fall through to STORE_INDEX1 for uncompressed (always block0) } case STORE_INDEX1: if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v; if (cur >= start + pos_end) return length; state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY && s->chunk.encoding_kind != DICTIONARY_V2 && (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT || s->chunk.type_kind == DOUBLE)) ? STORE_INDEX0 : STORE_INDEX2; break; case STORE_INDEX2: if (ci_id < CI_PRESENT) { // Boolean columns have an extra byte to indicate the position of the bit within the byte s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v; } if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++; if (cur >= start + pos_end) return length; state = STORE_INDEX0; break; } } return (uint32_t)(end - start); } /** * @brief Decode row group index entries * * @param[in,out] s row group index state * @param[in] num_rowgroups Number of index entries to read */ static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s* s, int num_rowgroups) { uint8_t const* index_data = s->chunk.streams[CI_INDEX]; int index_data_len = s->chunk.strm_len[CI_INDEX]; for (int i = 0; i < num_rowgroups; i++) { s->row_index_entry[0][0] = 0; s->row_index_entry[0][1] = 0; s->row_index_entry[1][0] = 0; s->row_index_entry[1][1] = 0; s->row_index_entry[2][0] = 0; s->row_index_entry[2][1] = 0; if (index_data_len > 0) { int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len); index_data += len; index_data_len = max(index_data_len - len, 0); for (int j = 0; j < 2; j++) { s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j]; s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j]; s->compressed_offset[i][j] = s->row_index_entry[0][j]; } } } s->chunk.streams[CI_INDEX] = index_data; s->chunk.strm_len[CI_INDEX] = index_data_len; } /** * @brief Translate block+offset compressed position into an uncompressed offset * * @param[in,out] s row group index state * @param[in] ci_id index to convert (CI_DATA or CI_DATA2) * @param[in] num_rowgroups Number of index entries * @param[in] t thread id */ static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s* s, int ci_id, int num_rowgroups, int t) { int32_t strm_len = s->chunk.strm_len[ci_id]; if (strm_len > 0) { int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0; if (compressed_offset > 0) { uint8_t const* start = s->strm_info[ci_id].compressed_data; uint8_t const* cur = start; uint8_t const* end = cur + s->strm_info[ci_id].compressed_data_size; auto dec_result = s->strm_info[ci_id].dec_res.data(); uint32_t uncomp_offset = 0; for (;;) { uint32_t block_len; if (cur + block_header_size > end || cur + block_header_size >= start + compressed_offset) { break; } block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16); cur += block_header_size; auto const is_uncompressed = static_cast<bool>(block_len & 1); block_len >>= 1; cur += block_len; if (cur > end) { break; } if (is_uncompressed) { uncomp_offset += block_len; } else { uncomp_offset += dec_result->bytes_written; dec_result++; } } s->rowgroups[t].strm_offset[ci_id] += uncomp_offset; } } } /** * @brief Decode index streams * * @param[out] row_groups RowGroup device array [rowgroup][column] * @param[in] strm_info List of compressed streams (or NULL if uncompressed) * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups * @param[in] rowidx_stride Row index stride * @param[in] use_base_stride Whether to use base stride obtained from meta or use the computed * value */ // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride) { __shared__ __align__(16) rowindex_state_s state_g; rowindex_state_s* const s = &state_g; uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[chunk_id]; if (strm_info) { if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]]; if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]]; } uint32_t rowgroups_in_chunk = s->chunk.num_rowgroups; s->rowgroup_start = s->chunk.rowgroup_id; s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk; s->is_compressed = (strm_info != nullptr); } __syncthreads(); while (s->rowgroup_start < s->rowgroup_end) { int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128); int rowgroup_size4, t4, t32; s->rowgroups[t].chunk_id = chunk_id; if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); } __syncthreads(); if (s->is_compressed) { // Convert the block + blk_offset pair into a raw offset into the decompressed stream if (s->chunk.strm_len[CI_DATA] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t); } if (s->chunk.strm_len[CI_DATA2] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t); } __syncthreads(); } rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t); t4 = t & 3; t32 = t >> 2; for (int i = t32; i < num_rowgroups; i += 32) { auto const num_rows = (use_base_stride) ? rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows; auto const start_row = (use_base_stride) ? i * rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row; for (int j = t4; j < rowgroup_size4; j += 4) { ((uint32_t*)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] = ((volatile uint32_t*)&s->rowgroups[i])[j]; } row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows = num_rows; // Updating in case of struct row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_child_rows = num_rows; row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row = start_row; } __syncthreads(); if (t == 0) { s->rowgroup_start += num_rowgroups; } __syncthreads(); } } template <int block_size> __global__ void __launch_bounds__(block_size) gpu_reduce_pushdown_masks(device_span<orc_column_device_view const> orc_columns, device_2dspan<rowgroup_rows const> rowgroup_bounds, device_2dspan<size_type> set_counts) { using BlockReduce = hipcub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; auto const column_id = blockIdx.x; auto const rowgroup_id = blockIdx.y; auto const column = orc_columns[column_id]; auto const t = threadIdx.x; auto const use_child_rg = column.type().id() == type_id::LIST; auto const rg = rowgroup_bounds[rowgroup_id][column_id + (use_child_rg ? 1 : 0)]; if (column.pushdown_mask == nullptr) { // All elements are valid if the null mask is not present if (t == 0) { set_counts[rowgroup_id][column_id] = rg.size(); } return; }; size_type count = 0; static constexpr size_type bits_per_word = sizeof(bitmask_type) * 8; for (auto row = t * bits_per_word + rg.begin; row < rg.end; row += block_size * bits_per_word) { auto const begin_bit = row; auto const end_bit = min(static_cast<size_type>(row + bits_per_word), rg.end); auto const mask_len = end_bit - begin_bit; auto const mask_word = cudf::detail::get_mask_offset_word(column.pushdown_mask, 0, row, end_bit) & ((1 << mask_len) - 1); count += __popc(mask_word); } count = BlockReduce(temp_storage).Sum(count); if (t == 0) { set_counts[rowgroup_id][column_id] = count; } } void __host__ ParseCompressedStripeData(CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t compression_block_size, uint32_t log2maxcr, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block hipLaunchKernelGGL(( gpuParseCompressedStripeData), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_info, num_streams, compression_block_size, log2maxcr); } void __host__ PostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block hipLaunchKernelGGL(( gpuPostDecompressionReassemble), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_info, num_streams); } void __host__ ParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block hipLaunchKernelGGL(( gpuParseRowGroupIndex), dim3(dim_grid), dim3(dim_block), 0, stream.value(), row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride, use_base_stride); } void __host__ reduce_pushdown_masks(device_span<orc_column_device_view const> columns, device_2dspan<rowgroup_rows const> rowgroups, device_2dspan<cudf::size_type> valid_counts, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(columns.size(), rowgroups.size().first); // 1 rowgroup per block hipLaunchKernelGGL(( gpu_reduce_pushdown_masks<128>) , dim3(dim_grid), dim3(dim_block), 0, stream.value(), columns, rowgroups, valid_counts); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
stripe_init.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_gpu.hpp" #include <cudf/io/orc_types.hpp> #include <io/utilities/block_utils.cuh> #include <cub/cub.cuh> #include <rmm/cuda_stream_view.hpp> #include <thrust/copy.h> #include <thrust/execution_policy.h> namespace cudf { namespace io { namespace orc { namespace gpu { struct comp_in_out { uint8_t const* in_ptr; size_t in_size; uint8_t* out_ptr; size_t out_size; }; struct compressed_stream_s { CompressedStreamInfo info; comp_in_out ctl; }; // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData( CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) { s->info = strm_info[strm_id]; } __syncthreads(); if (strm_id < num_streams) { // Walk through the compressed blocks uint8_t const* cur = s->info.compressed_data; uint8_t const* end = cur + s->info.compressed_data_size; uint8_t* uncompressed = s->info.uncompressed_data; size_t max_uncompressed_size = 0; uint32_t max_uncompressed_block_size = 0; uint32_t num_compressed_blocks = 0; uint32_t num_uncompressed_blocks = 0; while (cur + block_header_size < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); auto const is_uncompressed = static_cast<bool>(block_len & 1); uint32_t uncompressed_size; device_span<uint8_t const>* init_in_ctl = nullptr; device_span<uint8_t>* init_out_ctl = nullptr; block_len >>= 1; cur += block_header_size; if (block_len > block_size || cur + block_len > end) { // Fatal num_compressed_blocks = 0; max_uncompressed_size = 0; max_uncompressed_block_size = 0; break; } // TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual // uncompressed size and avoid waste due to block size alignment For now, rely on the max // compression ratio to limit waste for the most extreme cases (small single-block streams) uncompressed_size = (is_uncompressed) ? block_len : (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr : block_size; if (is_uncompressed) { if (uncompressed_size <= 32) { // For short blocks, copy the uncompressed data to output if (uncompressed && max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size && lane_id < uncompressed_size) { uncompressed[max_uncompressed_size + lane_id] = cur[lane_id]; } } else { init_in_ctl = (s->info.copy_in_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &s->info.copy_in_ctl[num_uncompressed_blocks] : nullptr; init_out_ctl = (s->info.copy_out_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks) ? &s->info.copy_out_ctl[num_uncompressed_blocks] : nullptr; num_uncompressed_blocks++; } } else { init_in_ctl = (s->info.dec_in_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &s->info.dec_in_ctl[num_compressed_blocks] : nullptr; init_out_ctl = (s->info.dec_out_ctl && num_compressed_blocks < s->info.num_compressed_blocks) ? &s->info.dec_out_ctl[num_compressed_blocks] : nullptr; num_compressed_blocks++; } if (!lane_id && init_in_ctl) { s->ctl = {cur, block_len, uncompressed + max_uncompressed_size, uncompressed_size}; } __syncwarp(); if (init_in_ctl && lane_id == 0) { *init_in_ctl = {s->ctl.in_ptr, s->ctl.in_size}; *init_out_ctl = {s->ctl.out_ptr, s->ctl.out_size}; } cur += block_len; max_uncompressed_size += uncompressed_size; max_uncompressed_block_size = max(max_uncompressed_block_size, uncompressed_size); } __syncwarp(); if (!lane_id) { s->info.num_compressed_blocks = num_compressed_blocks; s->info.num_uncompressed_blocks = num_uncompressed_blocks; s->info.max_uncompressed_size = max_uncompressed_size; s->info.max_uncompressed_block_size = max_uncompressed_block_size; } } __syncthreads(); if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info; } // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuPostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams) { __shared__ compressed_stream_s strm_g[4]; compressed_stream_s* const s = &strm_g[threadIdx.x / 32]; int strm_id = blockIdx.x * 4 + (threadIdx.x / 32); int lane_id = threadIdx.x % 32; if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id]; __syncthreads(); if (strm_id < num_streams && s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 && s->info.max_uncompressed_size > 0) { // Walk through the compressed blocks uint8_t const* cur = s->info.compressed_data; uint8_t const* end = cur + s->info.compressed_data_size; auto dec_out = s->info.dec_out_ctl; auto dec_result = s->info.dec_res; uint8_t* uncompressed_actual = s->info.uncompressed_data; uint8_t* uncompressed_estimated = uncompressed_actual; uint32_t num_compressed_blocks = 0; uint32_t max_compressed_blocks = s->info.num_compressed_blocks; while (cur + block_header_size < end) { uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0); auto const is_uncompressed = static_cast<bool>(block_len & 1); uint32_t uncompressed_size_est, uncompressed_size_actual; block_len >>= 1; cur += block_header_size; if (cur + block_len > end) { break; } if (is_uncompressed) { uncompressed_size_est = block_len; uncompressed_size_actual = block_len; } else { if (num_compressed_blocks > max_compressed_blocks) { break; } uint32_t const dst_size = dec_out[num_compressed_blocks].size(); uncompressed_size_est = shuffle((lane_id == 0) ? dst_size : 0); uint32_t const bytes_written = dec_result[num_compressed_blocks].bytes_written; uncompressed_size_actual = shuffle((lane_id == 0) ? bytes_written : 0); } // In practice, this should never happen with a well-behaved writer, as we would expect the // uncompressed size to always be equal to the compression block size except for the last // block if (uncompressed_actual < uncompressed_estimated) { // warp-level memmove for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) { uncompressed_actual[i] = uncompressed_estimated[i]; } } cur += block_len; num_compressed_blocks += 1 - is_uncompressed; uncompressed_estimated += uncompressed_size_est; uncompressed_actual += uncompressed_size_actual; } // Update info with actual uncompressed size if (!lane_id) { size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data; // Set uncompressed size to zero if there were any errors strm_info[strm_id].max_uncompressed_size = (num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0; } } } /** * @brief Shared mem state for gpuParseRowGroupIndex */ struct rowindex_state_s { ColumnDesc chunk; uint32_t rowgroup_start; uint32_t rowgroup_end; int is_compressed; uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2 CompressedStreamInfo strm_info[2]; RowGroup rowgroups[128]; uint32_t compressed_offset[128][2]; }; enum row_entry_state_e { NOT_FOUND = 0, GET_LENGTH, SKIP_VARINT, SKIP_FIXEDLEN, STORE_INDEX0, STORE_INDEX1, STORE_INDEX2, }; /** * @brief Calculates the order of index streams based on the index types present in the column. * * @param index_types_bitmap The bitmap of index types showing which index streams are present * * @return The order of index streams */ static auto __device__ index_order_from_index_types(uint32_t index_types_bitmap) { constexpr std::array full_order = {CI_PRESENT, CI_DATA, CI_DATA2}; std::array<uint32_t, full_order.size()> partial_order; thrust::copy_if(thrust::seq, full_order.cbegin(), full_order.cend(), partial_order.begin(), [index_types_bitmap] __device__(auto index_type) { // Check if the index type is present return index_types_bitmap & (1 << index_type); }); return partial_order; } /** * @brief Decode a single row group index entry * * @param[in,out] s row group index state * @param[in] start start position in byte stream * @param[in] end end of byte stream * @return bytes consumed */ static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s* s, uint8_t const* const start, uint8_t const* const end) { constexpr uint32_t pb_rowindexentry_id = ProtofType::FIXEDLEN + 8; auto const stream_order = index_order_from_index_types(s->chunk.skip_count); uint8_t const* cur = start; row_entry_state_e state = NOT_FOUND; uint32_t length = 0; uint32_t idx_id = 0; uint32_t pos_end = 0; uint32_t ci_id = CI_NUM_STREAMS; while (cur < end) { uint32_t v = 0; for (uint32_t l = 0; l <= 28; l += 7) { uint32_t c = (cur < end) ? *cur++ : 0; v |= (c & 0x7f) << l; if (c <= 0x7f) break; } switch (state) { case NOT_FOUND: if (v == pb_rowindexentry_id) { state = GET_LENGTH; } else { v &= 7; if (v == ProtofType::FIXED64) cur += 8; else if (v == ProtofType::FIXED32) cur += 4; else if (v == ProtofType::VARINT) state = SKIP_VARINT; else if (v == ProtofType::FIXEDLEN) state = SKIP_FIXEDLEN; } break; case SKIP_VARINT: state = NOT_FOUND; break; case SKIP_FIXEDLEN: cur += v; state = NOT_FOUND; break; case GET_LENGTH: if (length == 0) { length = (uint32_t)(cur + v - start); state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry // entry) } else { pos_end = min((uint32_t)(cur + v - start), length); state = STORE_INDEX0; } break; case STORE_INDEX0: // Start of a new entry; determine the stream index types ci_id = stream_order[idx_id++]; if (s->is_compressed) { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v; if (cur >= start + pos_end) return length; state = STORE_INDEX1; break; } else { if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0; // Fall through to STORE_INDEX1 for uncompressed (always block0) } case STORE_INDEX1: if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v; if (cur >= start + pos_end) return length; state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY && s->chunk.encoding_kind != DICTIONARY_V2 && (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT || s->chunk.type_kind == DOUBLE)) ? STORE_INDEX0 : STORE_INDEX2; break; case STORE_INDEX2: if (ci_id < CI_PRESENT) { // Boolean columns have an extra byte to indicate the position of the bit within the byte s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v; } if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++; if (cur >= start + pos_end) return length; state = STORE_INDEX0; break; } } return (uint32_t)(end - start); } /** * @brief Decode row group index entries * * @param[in,out] s row group index state * @param[in] num_rowgroups Number of index entries to read */ static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s* s, int num_rowgroups) { uint8_t const* index_data = s->chunk.streams[CI_INDEX]; int index_data_len = s->chunk.strm_len[CI_INDEX]; for (int i = 0; i < num_rowgroups; i++) { s->row_index_entry[0][0] = 0; s->row_index_entry[0][1] = 0; s->row_index_entry[1][0] = 0; s->row_index_entry[1][1] = 0; s->row_index_entry[2][0] = 0; s->row_index_entry[2][1] = 0; if (index_data_len > 0) { int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len); index_data += len; index_data_len = max(index_data_len - len, 0); for (int j = 0; j < 2; j++) { s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j]; s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j]; s->compressed_offset[i][j] = s->row_index_entry[0][j]; } } } s->chunk.streams[CI_INDEX] = index_data; s->chunk.strm_len[CI_INDEX] = index_data_len; } /** * @brief Translate block+offset compressed position into an uncompressed offset * * @param[in,out] s row group index state * @param[in] ci_id index to convert (CI_DATA or CI_DATA2) * @param[in] num_rowgroups Number of index entries * @param[in] t thread id */ static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s* s, int ci_id, int num_rowgroups, int t) { int32_t strm_len = s->chunk.strm_len[ci_id]; if (strm_len > 0) { int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0; if (compressed_offset > 0) { uint8_t const* start = s->strm_info[ci_id].compressed_data; uint8_t const* cur = start; uint8_t const* end = cur + s->strm_info[ci_id].compressed_data_size; auto dec_result = s->strm_info[ci_id].dec_res.data(); uint32_t uncomp_offset = 0; for (;;) { uint32_t block_len; if (cur + block_header_size > end || cur + block_header_size >= start + compressed_offset) { break; } block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16); cur += block_header_size; auto const is_uncompressed = static_cast<bool>(block_len & 1); block_len >>= 1; cur += block_len; if (cur > end) { break; } if (is_uncompressed) { uncomp_offset += block_len; } else { uncomp_offset += dec_result->bytes_written; dec_result++; } } s->rowgroups[t].strm_offset[ci_id] += uncomp_offset; } } } /** * @brief Decode index streams * * @param[out] row_groups RowGroup device array [rowgroup][column] * @param[in] strm_info List of compressed streams (or NULL if uncompressed) * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] num_rowgroups Number of row groups * @param[in] rowidx_stride Row index stride * @param[in] use_base_stride Whether to use base stride obtained from meta or use the computed * value */ // blockDim {128,1,1} __global__ void __launch_bounds__(128, 8) gpuParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride) { __shared__ __align__(16) rowindex_state_s state_g; rowindex_state_s* const s = &state_g; uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[chunk_id]; if (strm_info) { if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]]; if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]]; } uint32_t rowgroups_in_chunk = s->chunk.num_rowgroups; s->rowgroup_start = s->chunk.rowgroup_id; s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk; s->is_compressed = (strm_info != nullptr); } __syncthreads(); while (s->rowgroup_start < s->rowgroup_end) { int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128); int rowgroup_size4, t4, t32; s->rowgroups[t].chunk_id = chunk_id; if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); } __syncthreads(); if (s->is_compressed) { // Convert the block + blk_offset pair into a raw offset into the decompressed stream if (s->chunk.strm_len[CI_DATA] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t); } if (s->chunk.strm_len[CI_DATA2] > 0) { gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t); } __syncthreads(); } rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t); t4 = t & 3; t32 = t >> 2; for (int i = t32; i < num_rowgroups; i += 32) { auto const num_rows = (use_base_stride) ? rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows; auto const start_row = (use_base_stride) ? i * rowidx_stride : row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row; for (int j = t4; j < rowgroup_size4; j += 4) { ((uint32_t*)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] = ((volatile uint32_t*)&s->rowgroups[i])[j]; } row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows = num_rows; // Updating in case of struct row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_child_rows = num_rows; row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row = start_row; } __syncthreads(); if (t == 0) { s->rowgroup_start += num_rowgroups; } __syncthreads(); } } template <int block_size> __global__ void __launch_bounds__(block_size) gpu_reduce_pushdown_masks(device_span<orc_column_device_view const> orc_columns, device_2dspan<rowgroup_rows const> rowgroup_bounds, device_2dspan<size_type> set_counts) { using BlockReduce = cub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; auto const column_id = blockIdx.x; auto const rowgroup_id = blockIdx.y; auto const column = orc_columns[column_id]; auto const t = threadIdx.x; auto const use_child_rg = column.type().id() == type_id::LIST; auto const rg = rowgroup_bounds[rowgroup_id][column_id + (use_child_rg ? 1 : 0)]; if (column.pushdown_mask == nullptr) { // All elements are valid if the null mask is not present if (t == 0) { set_counts[rowgroup_id][column_id] = rg.size(); } return; }; size_type count = 0; static constexpr size_type bits_per_word = sizeof(bitmask_type) * 8; for (auto row = t * bits_per_word + rg.begin; row < rg.end; row += block_size * bits_per_word) { auto const begin_bit = row; auto const end_bit = min(static_cast<size_type>(row + bits_per_word), rg.end); auto const mask_len = end_bit - begin_bit; auto const mask_word = cudf::detail::get_mask_offset_word(column.pushdown_mask, 0, row, end_bit) & ((1 << mask_len) - 1); count += __popc(mask_word); } count = BlockReduce(temp_storage).Sum(count); if (t == 0) { set_counts[rowgroup_id][column_id] = count; } } void __host__ ParseCompressedStripeData(CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t compression_block_size, uint32_t log2maxcr, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block gpuParseCompressedStripeData<<<dim_grid, dim_block, 0, stream.value()>>>( strm_info, num_streams, compression_block_size, log2maxcr); } void __host__ PostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block gpuPostDecompressionReassemble<<<dim_grid, dim_block, 0, stream.value()>>>(strm_info, num_streams); } void __host__ ParseRowGroupIndex(RowGroup* row_groups, CompressedStreamInfo* strm_info, ColumnDesc* chunks, uint32_t num_columns, uint32_t num_stripes, uint32_t num_rowgroups, uint32_t rowidx_stride, bool use_base_stride, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block gpuParseRowGroupIndex<<<dim_grid, dim_block, 0, stream.value()>>>(row_groups, strm_info, chunks, num_columns, num_stripes, num_rowgroups, rowidx_stride, use_base_stride); } void __host__ reduce_pushdown_masks(device_span<orc_column_device_view const> columns, device_2dspan<rowgroup_rows const> rowgroups, device_2dspan<cudf::size_type> valid_counts, rmm::cuda_stream_view stream) { dim3 dim_block(128, 1); dim3 dim_grid(columns.size(), rowgroups.size().first); // 1 rowgroup per block gpu_reduce_pushdown_masks<128> <<<dim_grid, dim_block, 0, stream.value()>>>(columns, rowgroups, valid_counts); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
d9565548fe494c865d33c6f63dba0d8f9f74b77f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Alexandre Maros - 2016 // // Cuda Matrix Multiplication with Shared Memory. // // nvcc cuda_matrix_shared.cu -o cs.o // // Implemented by Alexandre Maros for learning purposes. // A version of this code using Global Memory is in here: // https://github.com/alepmaros/cuda_matrix_multiplication // // Distributed under the MIT Lincese. #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <bitset> #include <time.h> #define NTHREADS_X 32 #define NTHREADS_Y 32 //#define N 3 //#define M 1 static const int input_size = 1000; static const int DEPTH = 20; static const int stride = 1; static const int padding = 0; static const int filter_size = 3; static const int output_size = (input_size - filter_size + 2*padding)/stride + 1; static const int comp_col = filter_size*DEPTH/32 + 1; //static const int depth_output_size = (DEPTH - DEPTH + 2*padding)/stride + 1; //#define THREADS_PER_BLOCK NTHREADS_X * NTHREADS_Y //A macro used for error checking in CUDA function calls //Credit to: http://stackoverflow.com/a/14038590 for the gpuErrchk macro. #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } __device__ int result_device[output_size]; inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { // fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) { exit(code); } } } __device__ void Mat_ACC(unsigned int C[filter_size][comp_col], int filter_size, int column){ unsigned int counter = 0; unsigned int pop = filter_size*DEPTH; unsigned int threshold = filter_size*filter_size*DEPTH/2; __syncthreads(); for (int i = 0; i<filter_size; i++) { for (int j = 0; j<comp_col; j++) { if(pop<32){ counter += __popc(C[i][j]); counter = counter - (32 - pop); } else { counter += __popc(C[i][j]); pop = pop - 32; } } } if (counter > threshold){ result_device[column] = 1; } else{ result_device[column] = 0; } __syncthreads(); } __device__ void Mat_XNOR(unsigned int A[filter_size][comp_col], unsigned int B[filter_size][comp_col], unsigned int C[filter_size][comp_col], int column){ __syncthreads(); for (unsigned int i=0; i<filter_size; i++){ for(unsigned int j=0; j<comp_col; j++){ C[i][j] = ~((A[i][j] ^ B[i][j])); } } Mat_ACC(C, filter_size, column); __syncthreads(); } __global__ void convolve(unsigned int i_fmap[][input_size*DEPTH], unsigned int filter[][filter_size*DEPTH], unsigned int output[][output_size], int padding, int stride){ // printf("entering conv function\n"); int column = blockIdx.x * blockDim.x + threadIdx.x; int line = blockIdx.y * blockDim.y + threadIdx.y; // printf("column = %d\n",column); // printf("line = %d\n",line); unsigned int pA_size, pB_size, pC_size; // unsigned int xA_size, xB_size, xC_size; unsigned int (*pA)[filter_size*DEPTH], (*pB)[filter_size*DEPTH], (*pC)[filter_size*DEPTH]; // unsigned int** pA = new (unsigned int*)[filter_size]; // unsigned int** pB = new (unsigned int*)[filter_size]; // unsigned int** pC = new (unsigned int*)[filter_size]; // for (int i=0; i<filter_size; i++){ // pA[i] = new (unsigned int*)[filter_size]; // pB[i] = new (unsigned int*)[filter_size]; // pC[i] = new (unsigned int*)[filter_size]; // } pA_size = filter_size*filter_size*DEPTH* sizeof(unsigned int); pB_size = filter_size*filter_size*DEPTH* sizeof(unsigned int); pC_size = filter_size*filter_size*DEPTH* sizeof(unsigned int); // xA_size = filter_size*comp_col * sizeof(unsigned int); // xB_size = filter_size*comp_col * sizeof(unsigned int); // xC_size = filter_size*comp_col * sizeof(unsigned int); // pA = (unsigned int **)malloc(pA_size); // pB = (unsigned int **)malloc(pB_size); // pC = (unsigned int **)malloc(pC_size); // printf("Before CUDAMALLOC function\n"); hipMalloc((void **) &pA, pA_size); hipMalloc((void **) &pB, pB_size); hipMalloc((void **) &pC, pC_size); // hipMalloc((void **) &xA, xA_size); // hipMalloc((void **) &xB, xB_size); // hipMalloc((void **) &xC, xC_size); // printf("After CUDAMALLOC function\n"); // printf("%u\n", filter[1][1]); // for (int fout_r = 0; fout_r<output_size; fout_r++) { for (int fout_c = 0; fout_c < output_size; fout_c++) { for (int fr = 0; fr < filter_size; fr++) { for (int fc = 0; fc < filter_size * DEPTH; fc++) { //take values from input matrix pB[fr][fc] = filter[fr][fc]; // printf("for loop\n"); } } for (int d = 0; d<DEPTH; d++) { for (int r = 0; r < filter_size; r++) { for (int c = 0; c < filter_size; c++) { pA[r][d*filter_size + c] = i_fmap[column * stride + r][d * input_size + fout_c*stride + c]; // printf("%u ", pA[r][d*filter_size + c]); } // printf("\n"); } // printf("\n"); } // memset(xA, 0, xA_size); // memset(xB, 0, xB_size); // memset(xC, 0, xC_size); Mat_XNOR(xA, xB, xC, column); // hipDeviceSynchronize(); // printf("After XNOR function\n"); // __syncthreads(); output[column][fout_c] = result_device[column]; // __syncthreads(); } // } // pB = filter[0]; // printf("Before XNOR function\n"); // printf("result_conv=%d\n", output[0][0*DEPTH + 0]); hipFree(pA); hipFree(pB); hipFree(pC); } int main(){ // printf("pA = %d", A[0][0][0]); // dim3 numBlocks(1,1,1); // dim3 threadsPerBlock(N,N,DEPTH); //hipDeviceSynchronize(); // MatAdd<<<numBlocks,threadsPerBlock>>>(pA,pB,pC); // hipDeviceSynchronize(); // unsigned int *a, *b, *c; // unsigned int *a_f, *b_f, *c_f; srand(time(0)); unsigned int A[input_size][input_size*DEPTH]; unsigned int B[filter_size][filter_size*DEPTH]; unsigned int C[output_size][output_size]; unsigned int xA[filter_size][comp_col], xB[filter_size][comp_col], xC[filter_size][comp_col]; int a_nlines, a_ncolumns; int b_nlines, b_ncolumns; // int c_nlines, c_ncolumns; hipEvent_t start, stop; gpuErrchk( hipEventCreate(&start) ); gpuErrchk( hipEventCreate(&stop) ); // scanf("%d", &a_nlines); scanf("%d", &a_ncolumns); scanf("%d", &b_nlines); scanf("%d", &b_ncolumns); // unsigned int (*tA)[input_size*DEPTH], (*tB)[filter_size*DEPTH], (*tC)[output_size]; gpuErrchk( hipMalloc((void**)&tA, (input_size*input_size * DEPTH)*sizeof(unsigned int))); gpuErrchk( hipMalloc((void**)&tB, (filter_size*filter_size * DEPTH)*sizeof(unsigned int))); gpuErrchk( hipMalloc((void**)&tC, (output_size*output_size)*sizeof(unsigned int))); memset(C, 0, output_size*output_size*sizeof(unsigned int)); //generate random input for (int layer = 0; layer < DEPTH; layer++){ for (int row = 0; row < input_size; row++){ for (int col = 0; col < input_size; col++){ // scanf("%u", &a[i * a_ncolumns + j]); A[row][col+layer*input_size] = rand() % 2; } } } for (int layer = 0; layer < DEPTH; layer++){ for (int row = 0; row < filter_size; row++){ for (int col = 0; col < filter_size; col++){ B[row][col+layer*filter_size] = rand() % 2; } } } printf("Input A = \n"); for (int layer = 0; layer < DEPTH; layer++){ for (int row = 0; row < input_size; row++){ for (int col = 0; col < input_size; col++){ printf("%u ", A[row][col+layer*input_size]); } printf("\n"); } printf("\n"); } printf("Filter B = \n"); for (int layer = 0; layer < DEPTH; layer++){ for (int row = 0; row < filter_size; row++){ for (int col = 0; col < filter_size; col++){ printf("%u ", B[row][col+layer*filter_size]); } printf("\n"); } printf("\n"); } //compress 32 bits into 1 unsigned int unsigned int pop = filter_size*DEPTH; for (int i = 0; i < filter_size; i++) { for (int k = 0; k < comp_col; k++) { if(pop<=32) { for (int j = 0; j < pop; j++) { xA[i][k] = xA[i][k] << 1; xA[i][k] = xA[i][k] | A[i][j]; } } else{ for (int j = 0; j < 32; j++) { xA[i][k] = xA[i][k] << 1; xA[i][k] = xA[i][k] | A[i][j]; } pop -= 32; } } } pop = filter_size*DEPTH; for (int i = 0; i < filter_size; i++) { for (int k = 0; k < comp_col; k++) { if(pop<=32) { for (int j = 0; j < pop; j++) { xB[i][k] = xB[i][k] << 1; xB[i][k] = xB[i][k] | B[i][j]; } } else{ for (int j = 0; j < 32; j++) { xB[i][k] = xB[i][k] << 1; xB[i][k] = xB[i][k] | B[i][j]; } pop -= 32; } } } // gpuErrchk( hipMemcpy(pA, a_f, pA_size, hipMemcpyHostToDevice) ); // gpuErrchk( hipMemcpy(pB, b_f, pB_size, hipMemcpyHostToDevice) ); // gpuErrchk( hipMemcpy(pC, c_f, pC_size, hipMemcpyHostToDevice) ); hipMemcpy(tA, A, (input_size*input_size*DEPTH)*sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(tB, B, (filter_size*filter_size*DEPTH)*sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(tC, C, (output_size*output_size)*sizeof(unsigned int), hipMemcpyHostToDevice); // gpuErrchk( hipMemcpy(fmap, i_fmap, fmap_size, hipMemcpyHostToDevice) ); // gpuErrchk( hipMemcpy(fil, filter, filter_size, hipMemcpyHostToDevice) ); // gpuErrchk( hipMemcpy(result, result_return, int_size, hipMemcpyHostToDevice) ); // dim3 threadsPerBlock(NTHREADS_X, 1); //(int)::ceil(NTHREADS_Y*(double)N/(32.0*32)) // dim3 NumberofBlocks((int) ::ceil( (double)N/NTHREADS_X), // (int) ::ceil( (double)N/(32*NTHREADS_Y))); // int result_host[1]; // hipMemcpyToSymbol(result_device, &result_host,sizeof(result_host), hipMemcpyHostToDevice); dim3 threadsPerBlock(1,1); //(int)::ceil(NTHREADS_Y*(double)N/(32.0*32)) // dim3 NumberofBlocks(1,1); dim3 NumberofBlocks(output_size,1); hipEventRecord(start); // Mat_XNOR<<<NumberofBlocks,threadsPerBlock>>>(pA,pB,pC); // hipDeviceSynchronize(); hipLaunchKernelGGL(( convolve), dim3(NumberofBlocks),dim3(threadsPerBlock), 0, 0, tA, tB, tC, padding, stride); // gpuErrchk( hipDeviceSynchronize() ); // gpuErrchk( hipMemcpy(c_f, pC, pC_size, hipMemcpyDeviceToHost) ); hipMemcpy(C, tC, (output_size*output_size)*sizeof(unsigned int), hipMemcpyDeviceToHost); // hipMemcpyFromSymbol(&result_host, &result_device, sizeof(result_host), 0, hipMemcpyDeviceToHost); // hipMemcpyFromSymbol(result_host, result_device, sizeof(result_host), 0); // gpuErrchk( hipMemcpy(result_return, result, int_size, hipMemcpyDeviceToHost) ); // gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipEventRecord(stop) ); gpuErrchk( hipEventSynchronize(stop) ); printf("C = \n"); // for (int layer = 0; layer < DEPTH; layer++){ for (int row = 0; row < output_size; row++) { for (int col = 0; col < output_size; col++) { printf("%u ", C[row][col]); } printf("\n"); } printf("\n"); // printf("result_host = %d\n", result_host[0]); #ifdef __NO_OUTPUT // print Matrix A float // printf("A = \n"); // for (i = 0; i < a_nlines; i++) // { // for (j = 0; j < pA_ncolumns; j++) // { // printf("%u ", a_f[i * pA_ncolumns + j]); // } // printf("\n"); // } // printf("\n"); // // print Matrix B float // printf("B = \n"); // for (i = 0; i < b_nlines; i++) // { // for (j = 0; j < pB_ncolumns; j++) // { // printf("%u ", b_f[i * pB_ncolumns + j]); // } // printf("\n"); // } // printf("\n"); // // print Matrix C float // printf("C = \n"); // for (i = 0; i < c_nlines; i++) // { // for (j = 0; j < pC_ncolumns; j++) // { // printf("%u ", C[i * pC_ncolumns + j]); // } // printf("\n"); // } #endif #ifdef __TIME float milliseconds = 0; gpuErrchk( hipEventElapsedTime(&milliseconds, start, stop) ); printf("Time shared:"); printf("%.5f\n", milliseconds); #endif // free(a); free(b); free(c); hipFree(tA); hipFree(tB); hipFree(tC); printf("\n"); return 0; }
d9565548fe494c865d33c6f63dba0d8f9f74b77f.cu
// // Alexandre Maros - 2016 // // Cuda Matrix Multiplication with Shared Memory. // // nvcc cuda_matrix_shared.cu -o cs.o // // Implemented by Alexandre Maros for learning purposes. // A version of this code using Global Memory is in here: // https://github.com/alepmaros/cuda_matrix_multiplication // // Distributed under the MIT Lincese. #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <bitset> #include <time.h> #define NTHREADS_X 32 #define NTHREADS_Y 32 //#define N 3 //#define M 1 static const int input_size = 1000; static const int DEPTH = 20; static const int stride = 1; static const int padding = 0; static const int filter_size = 3; static const int output_size = (input_size - filter_size + 2*padding)/stride + 1; static const int comp_col = filter_size*DEPTH/32 + 1; //static const int depth_output_size = (DEPTH - DEPTH + 2*padding)/stride + 1; //#define THREADS_PER_BLOCK NTHREADS_X * NTHREADS_Y //A macro used for error checking in CUDA function calls //Credit to: http://stackoverflow.com/a/14038590 for the gpuErrchk macro. #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } __device__ int result_device[output_size]; inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { // fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } __device__ void Mat_ACC(unsigned int C[filter_size][comp_col], int filter_size, int column){ unsigned int counter = 0; unsigned int pop = filter_size*DEPTH; unsigned int threshold = filter_size*filter_size*DEPTH/2; __syncthreads(); for (int i = 0; i<filter_size; i++) { for (int j = 0; j<comp_col; j++) { if(pop<32){ counter += __popc(C[i][j]); counter = counter - (32 - pop); } else { counter += __popc(C[i][j]); pop = pop - 32; } } } if (counter > threshold){ result_device[column] = 1; } else{ result_device[column] = 0; } __syncthreads(); } __device__ void Mat_XNOR(unsigned int A[filter_size][comp_col], unsigned int B[filter_size][comp_col], unsigned int C[filter_size][comp_col], int column){ __syncthreads(); for (unsigned int i=0; i<filter_size; i++){ for(unsigned int j=0; j<comp_col; j++){ C[i][j] = ~((A[i][j] ^ B[i][j])); } } Mat_ACC(C, filter_size, column); __syncthreads(); } __global__ void convolve(unsigned int i_fmap[][input_size*DEPTH], unsigned int filter[][filter_size*DEPTH], unsigned int output[][output_size], int padding, int stride){ // printf("entering conv function\n"); int column = blockIdx.x * blockDim.x + threadIdx.x; int line = blockIdx.y * blockDim.y + threadIdx.y; // printf("column = %d\n",column); // printf("line = %d\n",line); unsigned int pA_size, pB_size, pC_size; // unsigned int xA_size, xB_size, xC_size; unsigned int (*pA)[filter_size*DEPTH], (*pB)[filter_size*DEPTH], (*pC)[filter_size*DEPTH]; // unsigned int** pA = new (unsigned int*)[filter_size]; // unsigned int** pB = new (unsigned int*)[filter_size]; // unsigned int** pC = new (unsigned int*)[filter_size]; // for (int i=0; i<filter_size; i++){ // pA[i] = new (unsigned int*)[filter_size]; // pB[i] = new (unsigned int*)[filter_size]; // pC[i] = new (unsigned int*)[filter_size]; // } pA_size = filter_size*filter_size*DEPTH* sizeof(unsigned int); pB_size = filter_size*filter_size*DEPTH* sizeof(unsigned int); pC_size = filter_size*filter_size*DEPTH* sizeof(unsigned int); // xA_size = filter_size*comp_col * sizeof(unsigned int); // xB_size = filter_size*comp_col * sizeof(unsigned int); // xC_size = filter_size*comp_col * sizeof(unsigned int); // pA = (unsigned int **)malloc(pA_size); // pB = (unsigned int **)malloc(pB_size); // pC = (unsigned int **)malloc(pC_size); // printf("Before CUDAMALLOC function\n"); cudaMalloc((void **) &pA, pA_size); cudaMalloc((void **) &pB, pB_size); cudaMalloc((void **) &pC, pC_size); // cudaMalloc((void **) &xA, xA_size); // cudaMalloc((void **) &xB, xB_size); // cudaMalloc((void **) &xC, xC_size); // printf("After CUDAMALLOC function\n"); // printf("%u\n", filter[1][1]); // for (int fout_r = 0; fout_r<output_size; fout_r++) { for (int fout_c = 0; fout_c < output_size; fout_c++) { for (int fr = 0; fr < filter_size; fr++) { for (int fc = 0; fc < filter_size * DEPTH; fc++) { //take values from input matrix pB[fr][fc] = filter[fr][fc]; // printf("for loop\n"); } } for (int d = 0; d<DEPTH; d++) { for (int r = 0; r < filter_size; r++) { for (int c = 0; c < filter_size; c++) { pA[r][d*filter_size + c] = i_fmap[column * stride + r][d * input_size + fout_c*stride + c]; // printf("%u ", pA[r][d*filter_size + c]); } // printf("\n"); } // printf("\n"); } // memset(xA, 0, xA_size); // memset(xB, 0, xB_size); // memset(xC, 0, xC_size); Mat_XNOR(xA, xB, xC, column); // cudaThreadSynchronize(); // printf("After XNOR function\n"); // __syncthreads(); output[column][fout_c] = result_device[column]; // __syncthreads(); } // } // pB = filter[0]; // printf("Before XNOR function\n"); // printf("result_conv=%d\n", output[0][0*DEPTH + 0]); cudaFree(pA); cudaFree(pB); cudaFree(pC); } int main(){ // printf("pA = %d", A[0][0][0]); // dim3 numBlocks(1,1,1); // dim3 threadsPerBlock(N,N,DEPTH); //cudaDeviceSynchronize(); // MatAdd<<<numBlocks,threadsPerBlock>>>(pA,pB,pC); // cudaDeviceSynchronize(); // unsigned int *a, *b, *c; // unsigned int *a_f, *b_f, *c_f; srand(time(0)); unsigned int A[input_size][input_size*DEPTH]; unsigned int B[filter_size][filter_size*DEPTH]; unsigned int C[output_size][output_size]; unsigned int xA[filter_size][comp_col], xB[filter_size][comp_col], xC[filter_size][comp_col]; int a_nlines, a_ncolumns; int b_nlines, b_ncolumns; // int c_nlines, c_ncolumns; cudaEvent_t start, stop; gpuErrchk( cudaEventCreate(&start) ); gpuErrchk( cudaEventCreate(&stop) ); // scanf("%d", &a_nlines); scanf("%d", &a_ncolumns); scanf("%d", &b_nlines); scanf("%d", &b_ncolumns); // unsigned int (*tA)[input_size*DEPTH], (*tB)[filter_size*DEPTH], (*tC)[output_size]; gpuErrchk( cudaMalloc((void**)&tA, (input_size*input_size * DEPTH)*sizeof(unsigned int))); gpuErrchk( cudaMalloc((void**)&tB, (filter_size*filter_size * DEPTH)*sizeof(unsigned int))); gpuErrchk( cudaMalloc((void**)&tC, (output_size*output_size)*sizeof(unsigned int))); memset(C, 0, output_size*output_size*sizeof(unsigned int)); //generate random input for (int layer = 0; layer < DEPTH; layer++){ for (int row = 0; row < input_size; row++){ for (int col = 0; col < input_size; col++){ // scanf("%u", &a[i * a_ncolumns + j]); A[row][col+layer*input_size] = rand() % 2; } } } for (int layer = 0; layer < DEPTH; layer++){ for (int row = 0; row < filter_size; row++){ for (int col = 0; col < filter_size; col++){ B[row][col+layer*filter_size] = rand() % 2; } } } printf("Input A = \n"); for (int layer = 0; layer < DEPTH; layer++){ for (int row = 0; row < input_size; row++){ for (int col = 0; col < input_size; col++){ printf("%u ", A[row][col+layer*input_size]); } printf("\n"); } printf("\n"); } printf("Filter B = \n"); for (int layer = 0; layer < DEPTH; layer++){ for (int row = 0; row < filter_size; row++){ for (int col = 0; col < filter_size; col++){ printf("%u ", B[row][col+layer*filter_size]); } printf("\n"); } printf("\n"); } //compress 32 bits into 1 unsigned int unsigned int pop = filter_size*DEPTH; for (int i = 0; i < filter_size; i++) { for (int k = 0; k < comp_col; k++) { if(pop<=32) { for (int j = 0; j < pop; j++) { xA[i][k] = xA[i][k] << 1; xA[i][k] = xA[i][k] | A[i][j]; } } else{ for (int j = 0; j < 32; j++) { xA[i][k] = xA[i][k] << 1; xA[i][k] = xA[i][k] | A[i][j]; } pop -= 32; } } } pop = filter_size*DEPTH; for (int i = 0; i < filter_size; i++) { for (int k = 0; k < comp_col; k++) { if(pop<=32) { for (int j = 0; j < pop; j++) { xB[i][k] = xB[i][k] << 1; xB[i][k] = xB[i][k] | B[i][j]; } } else{ for (int j = 0; j < 32; j++) { xB[i][k] = xB[i][k] << 1; xB[i][k] = xB[i][k] | B[i][j]; } pop -= 32; } } } // gpuErrchk( cudaMemcpy(pA, a_f, pA_size, cudaMemcpyHostToDevice) ); // gpuErrchk( cudaMemcpy(pB, b_f, pB_size, cudaMemcpyHostToDevice) ); // gpuErrchk( cudaMemcpy(pC, c_f, pC_size, cudaMemcpyHostToDevice) ); cudaMemcpy(tA, A, (input_size*input_size*DEPTH)*sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(tB, B, (filter_size*filter_size*DEPTH)*sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(tC, C, (output_size*output_size)*sizeof(unsigned int), cudaMemcpyHostToDevice); // gpuErrchk( cudaMemcpy(fmap, i_fmap, fmap_size, cudaMemcpyHostToDevice) ); // gpuErrchk( cudaMemcpy(fil, filter, filter_size, cudaMemcpyHostToDevice) ); // gpuErrchk( cudaMemcpy(result, result_return, int_size, cudaMemcpyHostToDevice) ); // dim3 threadsPerBlock(NTHREADS_X, 1); //(int)std::ceil(NTHREADS_Y*(double)N/(32.0*32)) // dim3 NumberofBlocks((int) std::ceil( (double)N/NTHREADS_X), // (int) std::ceil( (double)N/(32*NTHREADS_Y))); // int result_host[1]; // cudaMemcpyToSymbol(result_device, &result_host,sizeof(result_host), cudaMemcpyHostToDevice); dim3 threadsPerBlock(1,1); //(int)std::ceil(NTHREADS_Y*(double)N/(32.0*32)) // dim3 NumberofBlocks(1,1); dim3 NumberofBlocks(output_size,1); cudaEventRecord(start); // Mat_XNOR<<<NumberofBlocks,threadsPerBlock>>>(pA,pB,pC); // cudaDeviceSynchronize(); convolve<<<NumberofBlocks,threadsPerBlock>>>(tA, tB, tC, padding, stride); // gpuErrchk( cudaDeviceSynchronize() ); // gpuErrchk( cudaMemcpy(c_f, pC, pC_size, cudaMemcpyDeviceToHost) ); cudaMemcpy(C, tC, (output_size*output_size)*sizeof(unsigned int), cudaMemcpyDeviceToHost); // cudaMemcpyFromSymbol(&result_host, &result_device, sizeof(result_host), 0, cudaMemcpyDeviceToHost); // cudaMemcpyFromSymbol(result_host, result_device, sizeof(result_host), 0); // gpuErrchk( cudaMemcpy(result_return, result, int_size, cudaMemcpyDeviceToHost) ); // gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaEventRecord(stop) ); gpuErrchk( cudaEventSynchronize(stop) ); printf("C = \n"); // for (int layer = 0; layer < DEPTH; layer++){ for (int row = 0; row < output_size; row++) { for (int col = 0; col < output_size; col++) { printf("%u ", C[row][col]); } printf("\n"); } printf("\n"); // printf("result_host = %d\n", result_host[0]); #ifdef __NO_OUTPUT // print Matrix A float // printf("A = \n"); // for (i = 0; i < a_nlines; i++) // { // for (j = 0; j < pA_ncolumns; j++) // { // printf("%u ", a_f[i * pA_ncolumns + j]); // } // printf("\n"); // } // printf("\n"); // // print Matrix B float // printf("B = \n"); // for (i = 0; i < b_nlines; i++) // { // for (j = 0; j < pB_ncolumns; j++) // { // printf("%u ", b_f[i * pB_ncolumns + j]); // } // printf("\n"); // } // printf("\n"); // // print Matrix C float // printf("C = \n"); // for (i = 0; i < c_nlines; i++) // { // for (j = 0; j < pC_ncolumns; j++) // { // printf("%u ", C[i * pC_ncolumns + j]); // } // printf("\n"); // } #endif #ifdef __TIME float milliseconds = 0; gpuErrchk( cudaEventElapsedTime(&milliseconds, start, stop) ); printf("Time shared:"); printf("%.5f\n", milliseconds); #endif // free(a); free(b); free(c); cudaFree(tA); cudaFree(tB); cudaFree(tC); printf("\n"); return 0; }
99bb0332871cabcdda8ecee3f945c02c5a3cd20d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); __global__ void saxpy_kernel(int N, float alpha, float* x, float* y, float* result) { // compute overall index from position of thread in current block, // and given the block we are in int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) result[index] = alpha * x[index] + y[index]; } void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) { int totalBytes = sizeof(float) * 3 * N; // compute number of blocks and threads per block const int threadsPerBlock = 512; const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; float* device_x; float* device_y; float* device_result; int size = N * sizeof(float); // // TODO allocate device memory buffers on the GPU using hipMalloc // hipMalloc(&device_x, size); hipMalloc(&device_y, size); hipMalloc(&device_result, size); // start timing after allocation of device memory double startTime = CycleTimer::currentSeconds(); // // TODO copy input arrays to the GPU using hipMemcpy // hipMemcpy(device_x, xarray, size, hipMemcpyHostToDevice); hipMemcpy(device_y, yarray, size, hipMemcpyHostToDevice); // run kernel double myStartTime = CycleTimer::currentSeconds(); hipLaunchKernelGGL(( saxpy_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, alpha, device_x, device_y, device_result); hipDeviceSynchronize(); double myEndTime = CycleTimer::currentSeconds(); // // TODO copy result from GPU using hipMemcpy // hipMemcpy(resultarray, device_result, size, hipMemcpyDeviceToHost); // end timing after result has been copied back into host memory double endTime = CycleTimer::currentSeconds(); double cpuStartTime = CycleTimer::currentSeconds(); for(int i = 0; i < N; ++i) { resultarray[i] = alpha * xarray[i] + yarray[i]; } double cpuEndTime = CycleTimer::currentSeconds(); hipError_t errCode = hipPeekAtLastError(); if (errCode != hipSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode)); } double overallDuration = endTime - startTime; double myOverallDuration = myEndTime - myStartTime; double cpuOverallDuration = cpuEndTime - cpuStartTime; printf("Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); printf("Execute: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * myOverallDuration, toBW(totalBytes, myOverallDuration)); printf("CPUtime: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * cpuOverallDuration, toBW(totalBytes, cpuOverallDuration)); printf("\n"); // TODO free memory buffers on the GPU hipFree(device_x); hipFree(device_y); hipFree(device_result); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
99bb0332871cabcdda8ecee3f945c02c5a3cd20d.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); __global__ void saxpy_kernel(int N, float alpha, float* x, float* y, float* result) { // compute overall index from position of thread in current block, // and given the block we are in int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) result[index] = alpha * x[index] + y[index]; } void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) { int totalBytes = sizeof(float) * 3 * N; // compute number of blocks and threads per block const int threadsPerBlock = 512; const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; float* device_x; float* device_y; float* device_result; int size = N * sizeof(float); // // TODO allocate device memory buffers on the GPU using cudaMalloc // cudaMalloc(&device_x, size); cudaMalloc(&device_y, size); cudaMalloc(&device_result, size); // start timing after allocation of device memory double startTime = CycleTimer::currentSeconds(); // // TODO copy input arrays to the GPU using cudaMemcpy // cudaMemcpy(device_x, xarray, size, cudaMemcpyHostToDevice); cudaMemcpy(device_y, yarray, size, cudaMemcpyHostToDevice); // run kernel double myStartTime = CycleTimer::currentSeconds(); saxpy_kernel<<<blocks, threadsPerBlock>>>(N, alpha, device_x, device_y, device_result); cudaThreadSynchronize(); double myEndTime = CycleTimer::currentSeconds(); // // TODO copy result from GPU using cudaMemcpy // cudaMemcpy(resultarray, device_result, size, cudaMemcpyDeviceToHost); // end timing after result has been copied back into host memory double endTime = CycleTimer::currentSeconds(); double cpuStartTime = CycleTimer::currentSeconds(); for(int i = 0; i < N; ++i) { resultarray[i] = alpha * xarray[i] + yarray[i]; } double cpuEndTime = CycleTimer::currentSeconds(); cudaError_t errCode = cudaPeekAtLastError(); if (errCode != cudaSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode)); } double overallDuration = endTime - startTime; double myOverallDuration = myEndTime - myStartTime; double cpuOverallDuration = cpuEndTime - cpuStartTime; printf("Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); printf("Execute: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * myOverallDuration, toBW(totalBytes, myOverallDuration)); printf("CPUtime: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * cpuOverallDuration, toBW(totalBytes, cpuOverallDuration)); printf("\n"); // TODO free memory buffers on the GPU cudaFree(device_x); cudaFree(device_y); cudaFree(device_result); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
ade4cd8f70f9c71bc812ee5d5d82e248193a55b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Introduction to CUDA GPU programming * * https://devblogs.nvidia.com/parallelforall/even-easier-introduction-cuda/ */ #include <iostream> #include <math.h> // function to add the elements of two arrays // __global__ tells the complier that this function // runs on the GPU. In CUDA a function with __global__ // is known as a kernel // __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; // 1M elements float *x, *y; hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // Old memory allocation //float *x = new float[N]; //float *y = new float[N]; // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the CPU //add(N, x, y); // launch with 1 GPU thread hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory //delete [] x; //delete [] y; hipFree(x); hipFree(y); return 0; }
ade4cd8f70f9c71bc812ee5d5d82e248193a55b2.cu
/* Introduction to CUDA GPU programming * * https://devblogs.nvidia.com/parallelforall/even-easier-introduction-cuda/ */ #include <iostream> #include <math.h> // function to add the elements of two arrays // __global__ tells the complier that this function // runs on the GPU. In CUDA a function with __global__ // is known as a kernel // __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; // 1M elements float *x, *y; cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // Old memory allocation //float *x = new float[N]; //float *y = new float[N]; // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the CPU //add(N, x, y); // launch with 1 GPU thread add<<<1, 1>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory //delete [] x; //delete [] y; cudaFree(x); cudaFree(y); return 0; }
55b5a856d0877a2c6f2a1d82d7b3128d00c00f4b.hip
// !!! This is a file automatically generated by hipify!!! // --------------------------------------------------------- // Copyright (c) 2016, Andy Zeng // // This file is part of the APC Vision Toolbox and is available // under the terms of the Simplified BSD License provided in // LICENSE. Please retain this notice and LICENSE if you use // this file (or any portion of it) in your project. // --------------------------------------------------------- #include "depth_utils.h" #include "ros/ros.h" #include "marvin_convnet/DetectObjects.h" #include "realsense_camera/StreamSensor.h" #include <opencv2/opencv.hpp> // Marvin #define DATATYPE 0 #include "marvin.hpp" std::string shelf_net_arch_filename = "/home/andyz/apc/toolbox/ros-packages/catkin_ws/src/marvin_convnet/models/competition/net.json"; std::string tote_net_arch_filename = "/home/andyz/apc/toolbox/ros-packages/catkin_ws/src/marvin_convnet/models/competition/net.json"; std::string shelf_net_weights_filename = "/home/andyz/apc/toolbox/ros-packages/catkin_ws/src/marvin_convnet/models/competition/weights_shelf.marvin"; std::string tote_net_weights_filename = "/home/andyz/apc/toolbox/ros-packages/catkin_ws/src/marvin_convnet/models/competition/weights_tote.marvin"; // Service modes and names std::string service_name; // Directory to read/write all RGB-D files and response maps std::string read_directory; // Global buffers for sensor data retrieval int frame_width = 640; int frame_height = 480; uint8_t * color_buffer = new uint8_t[frame_width * frame_height * 3]; uint8_t * HHA_buffer = new uint8_t[frame_width * frame_height * 3]; // Load Marvin FCN network architectures marvin::Net shelf_net(shelf_net_arch_filename); marvin::Net tote_net(tote_net_arch_filename); // Marvin responses StorageT* color_data_CPU = NULL; StorageT* HHA_data_CPU = NULL; StorageT* prob_CPU_StorageT = NULL; ComputeT* prob_CPU_ComputeT = NULL; ros::ServiceClient client_sensor; const int num_apc_objects = 39; std::string shelf_bin_ids = "ABCDEFGHIJKL"; // Service call bool srv_detect(marvin_convnet::DetectObjects::Request &req, marvin_convnet::DetectObjects::Response &res) { ROS_INFO("Recieved service request."); int bin_id = req.BinId; int frame_id = req.FrameId; res.FrameId = frame_id; // Get frame filenames std::ostringstream frame_prefix; frame_prefix << std::setw(6) << std::setfill('0') << frame_id; std::string color_frame_filename = "/frame-" + frame_prefix.str() + ".color.png"; std::string depth_frame_filename = "/frame-" + frame_prefix.str() + ".depth.png"; std::string raw_depth_frame_filename = "/raw/frame-" + frame_prefix.str() + ".depth.png"; std::string HHA_frame_filename = "/HHA/frame-" + frame_prefix.str() + ".HHA.png"; // Read color frame from disk cv::Mat color_frame = cv::imread(read_directory + color_frame_filename, CV_LOAD_IMAGE_COLOR); color_buffer = color_frame.data; cv::Mat HHA_frame = cv::imread(read_directory + HHA_frame_filename.c_str(), CV_LOAD_IMAGE_COLOR); HHA_buffer = HHA_frame.data; // Color: BGR format, mean subtracted for (int r = 0; r < frame_height; ++r) for (int c = 0; c < frame_width; ++c) { color_data_CPU[0 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[0 + 3 * (c + frame_width * r)]) - ComputeT(102.9801f)); // B color_data_CPU[1 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[1 + 3 * (c + frame_width * r)]) - ComputeT(115.9465f)); // G color_data_CPU[2 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[2 + 3 * (c + frame_width * r)]) - ComputeT(122.7717f)); // R HHA_data_CPU[0 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(HHA_buffer[0 + 3 * (c + frame_width * r)]) - ComputeT(102.9801f)); // B HHA_data_CPU[1 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(HHA_buffer[1 + 3 * (c + frame_width * r)]) - ComputeT(115.9465f)); // G HHA_data_CPU[2 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(HHA_buffer[2 + 3 * (c + frame_width * r)]) - ComputeT(122.7717f)); // R } // Run forward pass through marvin FCN ROS_INFO("Forward Marvin to get segmentation results."); marvin::Response * rDataRGB; marvin::Response * rDataHHA; marvin::Response * rProb; if (bin_id == -1) { rDataRGB = tote_net.getResponse("data_RGB"); rDataHHA = tote_net.getResponse("data_HHA"); rProb = tote_net.getResponse("prob"); } else { rDataRGB = shelf_net.getResponse("data_RGB"); rDataHHA = shelf_net.getResponse("data_HHA"); rProb = shelf_net.getResponse("prob"); } hipMemcpy(rDataRGB->dataGPU, color_data_CPU, rDataRGB->numBytes(), hipMemcpyHostToDevice); hipMemcpy(rDataHHA->dataGPU, HHA_data_CPU, rDataHHA->numBytes(), hipMemcpyHostToDevice); if (bin_id == -1) tote_net.forward(); else shelf_net.forward(); hipMemcpy(prob_CPU_StorageT, rProb->dataGPU, rProb->numBytes(), hipMemcpyDeviceToHost); for (int i = 0; i < frame_height * frame_width * (num_apc_objects + 1); ++i) prob_CPU_ComputeT[i] = CPUStorage2ComputeT(prob_CPU_StorageT[i]); // Get full object list std::vector<std::string> all_object_names = {"background", "barkely_hide_bones", "cherokee_easy_tee_shirt", "clorox_utility_brush", "cloud_b_plush_bear", "command_hooks", "cool_shot_glue_sticks", "crayola_24_ct", "creativity_chenille_stems", "dasani_water_bottle", "dove_beauty_bar", "dr_browns_bottle_brush", "easter_turtle_sippy_cup", "elmers_washable_no_run_school_glue", "expo_dry_erase_board_eraser", "fiskars_scissors_red", "fitness_gear_3lb_dumbbell", "folgers_classic_roast_coffee", "hanes_tube_socks", "i_am_a_bunny_book", "jane_eyre_dvd", "kleenex_paper_towels", "kleenex_tissue_box", "kyjen_squeakin_eggs_plush_puppies", "laugh_out_loud_joke_book", "oral_b_toothbrush_green", "oral_b_toothbrush_red", "peva_shower_curtain_liner", "platinum_pets_dog_bowl", "rawlings_baseball", "rolodex_jumbo_pencil_cup", "safety_first_outlet_plugs", "scotch_bubble_mailer", "scotch_duct_tape", "soft_white_lightbulb", "staples_index_cards", "ticonderoga_12_pencils", "up_glucose_bottle", "womens_knit_gloves", "woods_extension_cord"}; std::vector<std::string> selected_object_names = req.ObjectNames; // Remove duplicates in selected object list std::sort(selected_object_names.begin(), selected_object_names.end()); selected_object_names.erase(std::unique(selected_object_names.begin(), selected_object_names.end()), selected_object_names.end()); // Loop through each object in selected list for (int selected_idx = 0; selected_idx < selected_object_names.size(); selected_idx++) { std::string curr_object_name = selected_object_names[selected_idx]; int curr_object_idx = std::distance(all_object_names.begin(), find(all_object_names.begin(), all_object_names.end(), curr_object_name)); std::vector<ComputeT> predMap_object(prob_CPU_ComputeT + curr_object_idx * frame_height * frame_width, prob_CPU_ComputeT + (curr_object_idx + 1) * frame_height * frame_width); // Create a folder to save results std::ifstream file(read_directory + "/masks"); if (file.fail()) system(std::string("mkdir -p " + read_directory + "/masks").c_str()); // Write segmentation response maps to 16-bit grayscale png image std::string result_filename = read_directory + "/masks/frame-" + frame_prefix.str() + "." + all_object_names[curr_object_idx] + ".mask.png"; cv::Mat result_mat(frame_height, frame_width, CV_16UC1); for (size_t y = 0; y < frame_height; y++) for (size_t x = 0; x < frame_width; x++) { unsigned short depth_short = (unsigned short)(predMap_object[y * frame_width + x] * 65535); result_mat.at<unsigned short>(y, x) = depth_short; } std::vector<int> compression_params; compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); compression_params.push_back(9); cv::imwrite(result_filename, result_mat, compression_params); } return true; } int main(int argc, char **argv) { // Setup ROS ros::init(argc, argv, "marvin_convnet", ros::init_options::AnonymousName); ros::NodeHandle n; ros::NodeHandle priv_nh("~"); // Get service parameters priv_nh.param("service_name", service_name, std::string("marvin_convnet")); priv_nh.param("read_directory", read_directory, std::string("")); // Assert parameters and create folder to save segmentation masks assert(!read_directory.empty()); system(std::string("mkdir -p " + read_directory).c_str()); // Start service ros::ServiceServer service_detect = n.advertiseService(service_name, srv_detect); // Connect to Realsense camera ROS_INFO("Reading data from directory: %s", read_directory.c_str()); // Setup Marvin ROS_INFO("Loading Marvin."); shelf_net.Malloc(marvin::Testing); tote_net.Malloc(marvin::Testing); shelf_net.loadWeights(shelf_net_weights_filename); tote_net.loadWeights(tote_net_weights_filename); color_data_CPU = new StorageT[frame_width * frame_height * 3]; HHA_data_CPU = new StorageT[frame_width * frame_height * 3]; prob_CPU_StorageT = new StorageT[frame_width * frame_height * (num_apc_objects + 1)]; prob_CPU_ComputeT = new ComputeT[frame_height * frame_width * (num_apc_objects + 1)]; ROS_INFO("Ready."); ros::spin(); return 0; }
55b5a856d0877a2c6f2a1d82d7b3128d00c00f4b.cu
// --------------------------------------------------------- // Copyright (c) 2016, Andy Zeng // // This file is part of the APC Vision Toolbox and is available // under the terms of the Simplified BSD License provided in // LICENSE. Please retain this notice and LICENSE if you use // this file (or any portion of it) in your project. // --------------------------------------------------------- #include "depth_utils.h" #include "ros/ros.h" #include "marvin_convnet/DetectObjects.h" #include "realsense_camera/StreamSensor.h" #include <opencv2/opencv.hpp> // Marvin #define DATATYPE 0 #include "marvin.hpp" std::string shelf_net_arch_filename = "/home/andyz/apc/toolbox/ros-packages/catkin_ws/src/marvin_convnet/models/competition/net.json"; std::string tote_net_arch_filename = "/home/andyz/apc/toolbox/ros-packages/catkin_ws/src/marvin_convnet/models/competition/net.json"; std::string shelf_net_weights_filename = "/home/andyz/apc/toolbox/ros-packages/catkin_ws/src/marvin_convnet/models/competition/weights_shelf.marvin"; std::string tote_net_weights_filename = "/home/andyz/apc/toolbox/ros-packages/catkin_ws/src/marvin_convnet/models/competition/weights_tote.marvin"; // Service modes and names std::string service_name; // Directory to read/write all RGB-D files and response maps std::string read_directory; // Global buffers for sensor data retrieval int frame_width = 640; int frame_height = 480; uint8_t * color_buffer = new uint8_t[frame_width * frame_height * 3]; uint8_t * HHA_buffer = new uint8_t[frame_width * frame_height * 3]; // Load Marvin FCN network architectures marvin::Net shelf_net(shelf_net_arch_filename); marvin::Net tote_net(tote_net_arch_filename); // Marvin responses StorageT* color_data_CPU = NULL; StorageT* HHA_data_CPU = NULL; StorageT* prob_CPU_StorageT = NULL; ComputeT* prob_CPU_ComputeT = NULL; ros::ServiceClient client_sensor; const int num_apc_objects = 39; std::string shelf_bin_ids = "ABCDEFGHIJKL"; // Service call bool srv_detect(marvin_convnet::DetectObjects::Request &req, marvin_convnet::DetectObjects::Response &res) { ROS_INFO("Recieved service request."); int bin_id = req.BinId; int frame_id = req.FrameId; res.FrameId = frame_id; // Get frame filenames std::ostringstream frame_prefix; frame_prefix << std::setw(6) << std::setfill('0') << frame_id; std::string color_frame_filename = "/frame-" + frame_prefix.str() + ".color.png"; std::string depth_frame_filename = "/frame-" + frame_prefix.str() + ".depth.png"; std::string raw_depth_frame_filename = "/raw/frame-" + frame_prefix.str() + ".depth.png"; std::string HHA_frame_filename = "/HHA/frame-" + frame_prefix.str() + ".HHA.png"; // Read color frame from disk cv::Mat color_frame = cv::imread(read_directory + color_frame_filename, CV_LOAD_IMAGE_COLOR); color_buffer = color_frame.data; cv::Mat HHA_frame = cv::imread(read_directory + HHA_frame_filename.c_str(), CV_LOAD_IMAGE_COLOR); HHA_buffer = HHA_frame.data; // Color: BGR format, mean subtracted for (int r = 0; r < frame_height; ++r) for (int c = 0; c < frame_width; ++c) { color_data_CPU[0 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[0 + 3 * (c + frame_width * r)]) - ComputeT(102.9801f)); // B color_data_CPU[1 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[1 + 3 * (c + frame_width * r)]) - ComputeT(115.9465f)); // G color_data_CPU[2 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(color_buffer[2 + 3 * (c + frame_width * r)]) - ComputeT(122.7717f)); // R HHA_data_CPU[0 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(HHA_buffer[0 + 3 * (c + frame_width * r)]) - ComputeT(102.9801f)); // B HHA_data_CPU[1 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(HHA_buffer[1 + 3 * (c + frame_width * r)]) - ComputeT(115.9465f)); // G HHA_data_CPU[2 * frame_height * frame_width + r * frame_width + c] = CPUCompute2StorageT(ComputeT(HHA_buffer[2 + 3 * (c + frame_width * r)]) - ComputeT(122.7717f)); // R } // Run forward pass through marvin FCN ROS_INFO("Forward Marvin to get segmentation results."); marvin::Response * rDataRGB; marvin::Response * rDataHHA; marvin::Response * rProb; if (bin_id == -1) { rDataRGB = tote_net.getResponse("data_RGB"); rDataHHA = tote_net.getResponse("data_HHA"); rProb = tote_net.getResponse("prob"); } else { rDataRGB = shelf_net.getResponse("data_RGB"); rDataHHA = shelf_net.getResponse("data_HHA"); rProb = shelf_net.getResponse("prob"); } cudaMemcpy(rDataRGB->dataGPU, color_data_CPU, rDataRGB->numBytes(), cudaMemcpyHostToDevice); cudaMemcpy(rDataHHA->dataGPU, HHA_data_CPU, rDataHHA->numBytes(), cudaMemcpyHostToDevice); if (bin_id == -1) tote_net.forward(); else shelf_net.forward(); cudaMemcpy(prob_CPU_StorageT, rProb->dataGPU, rProb->numBytes(), cudaMemcpyDeviceToHost); for (int i = 0; i < frame_height * frame_width * (num_apc_objects + 1); ++i) prob_CPU_ComputeT[i] = CPUStorage2ComputeT(prob_CPU_StorageT[i]); // Get full object list std::vector<std::string> all_object_names = {"background", "barkely_hide_bones", "cherokee_easy_tee_shirt", "clorox_utility_brush", "cloud_b_plush_bear", "command_hooks", "cool_shot_glue_sticks", "crayola_24_ct", "creativity_chenille_stems", "dasani_water_bottle", "dove_beauty_bar", "dr_browns_bottle_brush", "easter_turtle_sippy_cup", "elmers_washable_no_run_school_glue", "expo_dry_erase_board_eraser", "fiskars_scissors_red", "fitness_gear_3lb_dumbbell", "folgers_classic_roast_coffee", "hanes_tube_socks", "i_am_a_bunny_book", "jane_eyre_dvd", "kleenex_paper_towels", "kleenex_tissue_box", "kyjen_squeakin_eggs_plush_puppies", "laugh_out_loud_joke_book", "oral_b_toothbrush_green", "oral_b_toothbrush_red", "peva_shower_curtain_liner", "platinum_pets_dog_bowl", "rawlings_baseball", "rolodex_jumbo_pencil_cup", "safety_first_outlet_plugs", "scotch_bubble_mailer", "scotch_duct_tape", "soft_white_lightbulb", "staples_index_cards", "ticonderoga_12_pencils", "up_glucose_bottle", "womens_knit_gloves", "woods_extension_cord"}; std::vector<std::string> selected_object_names = req.ObjectNames; // Remove duplicates in selected object list std::sort(selected_object_names.begin(), selected_object_names.end()); selected_object_names.erase(std::unique(selected_object_names.begin(), selected_object_names.end()), selected_object_names.end()); // Loop through each object in selected list for (int selected_idx = 0; selected_idx < selected_object_names.size(); selected_idx++) { std::string curr_object_name = selected_object_names[selected_idx]; int curr_object_idx = std::distance(all_object_names.begin(), find(all_object_names.begin(), all_object_names.end(), curr_object_name)); std::vector<ComputeT> predMap_object(prob_CPU_ComputeT + curr_object_idx * frame_height * frame_width, prob_CPU_ComputeT + (curr_object_idx + 1) * frame_height * frame_width); // Create a folder to save results std::ifstream file(read_directory + "/masks"); if (file.fail()) system(std::string("mkdir -p " + read_directory + "/masks").c_str()); // Write segmentation response maps to 16-bit grayscale png image std::string result_filename = read_directory + "/masks/frame-" + frame_prefix.str() + "." + all_object_names[curr_object_idx] + ".mask.png"; cv::Mat result_mat(frame_height, frame_width, CV_16UC1); for (size_t y = 0; y < frame_height; y++) for (size_t x = 0; x < frame_width; x++) { unsigned short depth_short = (unsigned short)(predMap_object[y * frame_width + x] * 65535); result_mat.at<unsigned short>(y, x) = depth_short; } std::vector<int> compression_params; compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); compression_params.push_back(9); cv::imwrite(result_filename, result_mat, compression_params); } return true; } int main(int argc, char **argv) { // Setup ROS ros::init(argc, argv, "marvin_convnet", ros::init_options::AnonymousName); ros::NodeHandle n; ros::NodeHandle priv_nh("~"); // Get service parameters priv_nh.param("service_name", service_name, std::string("marvin_convnet")); priv_nh.param("read_directory", read_directory, std::string("")); // Assert parameters and create folder to save segmentation masks assert(!read_directory.empty()); system(std::string("mkdir -p " + read_directory).c_str()); // Start service ros::ServiceServer service_detect = n.advertiseService(service_name, srv_detect); // Connect to Realsense camera ROS_INFO("Reading data from directory: %s", read_directory.c_str()); // Setup Marvin ROS_INFO("Loading Marvin."); shelf_net.Malloc(marvin::Testing); tote_net.Malloc(marvin::Testing); shelf_net.loadWeights(shelf_net_weights_filename); tote_net.loadWeights(tote_net_weights_filename); color_data_CPU = new StorageT[frame_width * frame_height * 3]; HHA_data_CPU = new StorageT[frame_width * frame_height * 3]; prob_CPU_StorageT = new StorageT[frame_width * frame_height * (num_apc_objects + 1)]; prob_CPU_ComputeT = new ComputeT[frame_height * frame_width * (num_apc_objects + 1)]; ROS_INFO("Ready."); ros::spin(); return 0; }
adbfa27a501d60de9100e66beb5e84d8c6cd78aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/scale_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void ScaleForward(const int n, const Dtype* in, const Dtype* scale, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index]; } } template <typename Dtype> __global__ void ScaleBiasForward(const int n, const Dtype* in, const Dtype* scale, const Dtype* bias, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index] + bias[scale_index]; } } template <typename Dtype> void ScaleLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); if (bottom[0] == top[0]) { // in-place computation; need to store bottom data before overwriting it. // Note that this is only necessary for Backward; we could skip this if not // doing Backward, but Caffe currently provides no way of knowing whether // we'll need to do Backward at the time of the Forward call. const bool scale_param = (bottom.size() == 1); if (!scale_param || (scale_param && this->param_propagate_down_[0])) { caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), temp_.mutable_gpu_data()); } } const Dtype* scale_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); if (bias_layer_) { const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data(); ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_, top_data); } else { ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data); } } template <typename Dtype> void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (bias_layer_ && this->param_propagate_down_[this->param_propagate_down_.size() - 1]) { bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_); } const bool scale_param = (bottom.size() == 1); Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1]; if ((!scale_param && propagate_down[1]) || (scale_param && this->param_propagate_down_[0])) { const Dtype* top_diff = top[0]->gpu_diff(); const bool in_place = (bottom[0] == top[0]); const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data(); // Hack: store big eltwise product in bottom[0] diff, except in the special // case where this layer itself does the eltwise product, in which case we // can store it directly in the scale diff, and we're done. // If we're computing in-place (and not doing eltwise computation), this // hack doesn't work and we store the product in temp_. const bool is_eltwise = (bottom[0]->count() == scale->count()); Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() : (in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff())); caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product); if (!is_eltwise) { Dtype* sum_result = NULL; if (inner_dim_ == 1) { sum_result = product; } else if (sum_result_.count() == 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(inner_dim_, product, sum_mult, &result); *scale_diff += result; } else { caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff); } } else { const Dtype* sum_mult = sum_multiplier_.gpu_data(); sum_result = (outer_dim_ == 1) ? scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data(); caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_, Dtype(1), product, sum_mult, Dtype(0), sum_result); } if (outer_dim_ != 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); if (scale_dim_ == 1) { Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result); *scale_diff += result; } else { caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff); } } else { Dtype* scale_diff = scale->mutable_gpu_diff(); caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_, Dtype(1), sum_result, sum_mult, Dtype(scale_param), scale_diff); } } } } if (propagate_down[0]) { const int count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* scale_data = scale->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer); } // namespace caffe
adbfa27a501d60de9100e66beb5e84d8c6cd78aa.cu
#include <cfloat> #include <vector> #include "caffe/layers/scale_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void ScaleForward(const int n, const Dtype* in, const Dtype* scale, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index]; } } template <typename Dtype> __global__ void ScaleBiasForward(const int n, const Dtype* in, const Dtype* scale, const Dtype* bias, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index] + bias[scale_index]; } } template <typename Dtype> void ScaleLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); if (bottom[0] == top[0]) { // in-place computation; need to store bottom data before overwriting it. // Note that this is only necessary for Backward; we could skip this if not // doing Backward, but Caffe currently provides no way of knowing whether // we'll need to do Backward at the time of the Forward call. const bool scale_param = (bottom.size() == 1); if (!scale_param || (scale_param && this->param_propagate_down_[0])) { caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), temp_.mutable_gpu_data()); } } const Dtype* scale_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); if (bias_layer_) { const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data(); ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_, top_data); } else { ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data); } } template <typename Dtype> void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (bias_layer_ && this->param_propagate_down_[this->param_propagate_down_.size() - 1]) { bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_); } const bool scale_param = (bottom.size() == 1); Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1]; if ((!scale_param && propagate_down[1]) || (scale_param && this->param_propagate_down_[0])) { const Dtype* top_diff = top[0]->gpu_diff(); const bool in_place = (bottom[0] == top[0]); const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data(); // Hack: store big eltwise product in bottom[0] diff, except in the special // case where this layer itself does the eltwise product, in which case we // can store it directly in the scale diff, and we're done. // If we're computing in-place (and not doing eltwise computation), this // hack doesn't work and we store the product in temp_. const bool is_eltwise = (bottom[0]->count() == scale->count()); Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() : (in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff())); caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product); if (!is_eltwise) { Dtype* sum_result = NULL; if (inner_dim_ == 1) { sum_result = product; } else if (sum_result_.count() == 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(inner_dim_, product, sum_mult, &result); *scale_diff += result; } else { caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff); } } else { const Dtype* sum_mult = sum_multiplier_.gpu_data(); sum_result = (outer_dim_ == 1) ? scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data(); caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_, Dtype(1), product, sum_mult, Dtype(0), sum_result); } if (outer_dim_ != 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); if (scale_dim_ == 1) { Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result); *scale_diff += result; } else { caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff); } } else { Dtype* scale_diff = scale->mutable_gpu_diff(); caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_, Dtype(1), sum_result, sum_mult, Dtype(scale_param), scale_diff); } } } } if (propagate_down[0]) { const int count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* scale_data = scale->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer); } // namespace caffe
15c2b80d47babb507f7613d956091c22dc3bdcca.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief Breadth-first Search Top-Down test program * @file */ #include "Static/BreadthFirstSearch/TopDown2.cuh" #include <StandardAPI.hpp> #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <hip/hip_runtime_api.h> //--profile-from-start off #include "Static/BreadthFirstSearch/TopDown2.cuh" #include <StandardAPI.hpp> #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <hip/hip_runtime_api.h> //--profile-from-start off // #include "Dynamic/BFS/DynBFS.cuh" #include <StandardAPI.hpp> #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <hip/hip_runtime_api.h> //--profile-from-start off #include <Hornet.hpp> #include "StandardAPI.hpp" #include "Util/BatchFunctions.hpp" #include "Util/RandomGraphData.cuh" #include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension #include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo #include <algorithm> //std:.generate #include <chrono> //std::chrono #include <random> //std::mt19937_64 #include <Core/Static/Static.cuh> #include <ctime> template <typename HornetGraph, typename BFS> int exec(int argc, char* argv[]) { using namespace timer; using namespace hornets_nest; using namespace graph::structure_prop; using namespace graph::parsing_prop; using namespace graph; using HornetGPU = hornet::gpu::Hornet<vert_t>; using UpdatePtr = hornet::BatchUpdatePtr<vert_t, hornet::EMPTY, hornet::DeviceType::HOST>; using BatchUpdate = hornet::gpu::BatchUpdate<vert_t>; hipDeviceSetLimit(hipLimitPrintfFifoSize, 1000000000000000000); // graph::GraphStd<vid_t, eoff_t> graph(UNDIRECTED ); // graph::GraphStd<vid_t, eoff_t> graph(DIRECTED); // graph::GraphStd<vid_t, eoff_t> graph(ENABLE_INGOING); graph::GraphStd<vid_t, eoff_t> graph; CommandLineParam cmd(graph, argc, argv,false); // ParsingProp pp; // graph.read(argv[1],pp); HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); Timer<DEVICE> TM; HornetGraph hornet_graph(hornet_init); BFS bfs_top_down(hornet_graph); vid_t root = graph.max_out_degree_id(); // if (argc==3) // root = atoi(argv[2]); int numberRoots = 1; if (argc>=3) numberRoots = atoi(argv[2]); int alg = 0; if (argc>=4) alg = atoi(argv[3]); std::cout << "My root is " << root << std::endl; std::cout<<"prima size "<<hornet_graph.nE()<<"\n"; int batch_size = 10000; vert_t* batch_src = new vert_t[batch_size](); vert_t* batch_dst = new vert_t[batch_size](); generateBatch(graph, batch_size, batch_src, batch_dst, BatchGenType::INSERT, batch_gen_property::UNIQUE); UpdatePtr ptr(batch_size, batch_src, batch_dst); BatchUpdate batch_update_src_to_dst(ptr); UpdatePtr ptr2(batch_size, batch_dst, batch_src); BatchUpdate batch_update_dst_to_src(ptr2); hornet_graph.insert(batch_update_src_to_dst); hornet_graph.insert(batch_update_dst_to_src); batch_update_src_to_dst.print(); std::cout<<"dopo size "<<hornet_graph.nE()<<"\n"; hipProfilerStart(); for(int i=0; i<numberRoots; i++){ bfs_top_down.reset(); bfs_top_down.set_parameters((root+i)%graph.nV(),alg); TM.start(); // bfs_top_down.run(); bfs_top_down.print_check(batch_src, batch_dst, batch_size); TM.stop(); std::cout << "Number of levels is : " << bfs_top_down.getLevels() << std::endl; } hipProfilerStop(); TM.print("TopDown2"); return 0; } int main(int argc, char* argv[]) { int ret = 0; hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory. {//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. ret = exec<hornets_nest::HornetDynamicGraph, hornets_nest::BfsTopDown2Dynamic>(argc, argv); // ret = exec<hornets_nest::HornetStaticGraph, hornets_nest::BfsTopDown2Static >(argc, argv); }//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. hornets_nest::gpu::finalizeRMMPoolAllocation(); return ret; }
15c2b80d47babb507f7613d956091c22dc3bdcca.cu
/** * @brief Breadth-first Search Top-Down test program * @file */ #include "Static/BreadthFirstSearch/TopDown2.cuh" #include <StandardAPI.hpp> #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <cuda_profiler_api.h> //--profile-from-start off #include "Static/BreadthFirstSearch/TopDown2.cuh" #include <StandardAPI.hpp> #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <cuda_profiler_api.h> //--profile-from-start off // #include "Dynamic/BFS/DynBFS.cuh" #include <StandardAPI.hpp> #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <cuda_profiler_api.h> //--profile-from-start off #include <Hornet.hpp> #include "StandardAPI.hpp" #include "Util/BatchFunctions.hpp" #include "Util/RandomGraphData.cuh" #include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension #include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo #include <algorithm> //std:.generate #include <chrono> //std::chrono #include <random> //std::mt19937_64 #include <Core/Static/Static.cuh> #include <ctime> template <typename HornetGraph, typename BFS> int exec(int argc, char* argv[]) { using namespace timer; using namespace hornets_nest; using namespace graph::structure_prop; using namespace graph::parsing_prop; using namespace graph; using HornetGPU = hornet::gpu::Hornet<vert_t>; using UpdatePtr = hornet::BatchUpdatePtr<vert_t, hornet::EMPTY, hornet::DeviceType::HOST>; using BatchUpdate = hornet::gpu::BatchUpdate<vert_t>; cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 1000000000000000000); // graph::GraphStd<vid_t, eoff_t> graph(UNDIRECTED ); // graph::GraphStd<vid_t, eoff_t> graph(DIRECTED); // graph::GraphStd<vid_t, eoff_t> graph(ENABLE_INGOING); graph::GraphStd<vid_t, eoff_t> graph; CommandLineParam cmd(graph, argc, argv,false); // ParsingProp pp; // graph.read(argv[1],pp); HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); Timer<DEVICE> TM; HornetGraph hornet_graph(hornet_init); BFS bfs_top_down(hornet_graph); vid_t root = graph.max_out_degree_id(); // if (argc==3) // root = atoi(argv[2]); int numberRoots = 1; if (argc>=3) numberRoots = atoi(argv[2]); int alg = 0; if (argc>=4) alg = atoi(argv[3]); std::cout << "My root is " << root << std::endl; std::cout<<"prima size "<<hornet_graph.nE()<<"\n"; int batch_size = 10000; vert_t* batch_src = new vert_t[batch_size](); vert_t* batch_dst = new vert_t[batch_size](); generateBatch(graph, batch_size, batch_src, batch_dst, BatchGenType::INSERT, batch_gen_property::UNIQUE); UpdatePtr ptr(batch_size, batch_src, batch_dst); BatchUpdate batch_update_src_to_dst(ptr); UpdatePtr ptr2(batch_size, batch_dst, batch_src); BatchUpdate batch_update_dst_to_src(ptr2); hornet_graph.insert(batch_update_src_to_dst); hornet_graph.insert(batch_update_dst_to_src); batch_update_src_to_dst.print(); std::cout<<"dopo size "<<hornet_graph.nE()<<"\n"; cudaProfilerStart(); for(int i=0; i<numberRoots; i++){ bfs_top_down.reset(); bfs_top_down.set_parameters((root+i)%graph.nV(),alg); TM.start(); // bfs_top_down.run(); bfs_top_down.print_check(batch_src, batch_dst, batch_size); TM.stop(); std::cout << "Number of levels is : " << bfs_top_down.getLevels() << std::endl; } cudaProfilerStop(); TM.print("TopDown2"); return 0; } int main(int argc, char* argv[]) { int ret = 0; hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory. {//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. ret = exec<hornets_nest::HornetDynamicGraph, hornets_nest::BfsTopDown2Dynamic>(argc, argv); // ret = exec<hornets_nest::HornetStaticGraph, hornets_nest::BfsTopDown2Static >(argc, argv); }//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations. hornets_nest::gpu::finalizeRMMPoolAllocation(); return ret; }
1614c72692d224c103c92dafd82dc85d736fa163.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* A multithreaded C-program for MT19937. Original single threaded C reference coded by Takuji Nishimurar and Makoto Matsumoto, with initialization improved 2002/1/26. Multithreaded C implementation coded by Eric Mills. Before using, initialize the state by using mt19937gi(seed) or mt19937gai(init_key, key_length) for the global memory versions or mt19937si(seed) or mt19937sai(init_key, key_length) for all shared memory versions. Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, All rights reserved. Multithreaded implementation Copyright (C) 2007, Eric Mills. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Any feedback is very welcome. http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space) */ #define NVG80 /* For Nvidia G80 achitecture where mod is VERY slow */ #ifdef NVG80 #define mod(x, y) ((x) < (y) ? (x) : (x) - (y)) /* Short mod - known input range */ #else #define mod(x, y) ((x) % (y)) #endif #ifdef _WIN32 typedef unsigned int uint; #endif #define N 624 #define M 397 #define INIT_MULT 1812433253 /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ #define ARRAY_SEED 19650218 /* Seed for initial setup before incorp array seed */ #define MATRIX_A 0x9908b0df /* Constant vector a */ #define UPPER_MASK 0x80000000 /* Most significant w-r bits */ #define LOWER_MASK 0x7fffffff /* Least significant r bits */ #define TEMPER1 0x9d2c5680 #define TEMPER2 0xefc60000 /* First a global memory implementation that uses 2 global reads and 1 global * write per result and keeps only 2 words of state in permanent shared memory. */ #define MAX_THREADS 227 /* Set to minimise shared memory allocation (max blockDim.x) */ #define MAX_BLOCKS 256 /* Set to minimise global memory allocation (max gridDim.x) */ __shared__ int mtNext; /* Start of next block of seeds */ __shared__ uint mtNexti; /* Indirect on above to save global read cycle */ __device__ uint g_seeds[MAX_BLOCKS][N]; __constant__ uint mag01[2] = {0, MATRIX_A}; /* 2 way bus conflict for each read */ /* Init by single seed - single threaded as only used once */ __device__ static void mt19937gi(uint seed) { int i; mtNext = 0; if (threadIdx.x == 0) { g_seeds[blockIdx.x][0] = mtNexti = seed; for (i = 1; i < N; i++) { seed = (INIT_MULT * (seed ^ (seed >> 30)) + i); g_seeds[blockIdx.x][i] = seed; } } return; } /* Init by array - single threaded as only used once, opt to reduce global refs */ __device__ static void mt19937gai(uint* seeds, uint length) { mt19937gi(ARRAY_SEED); if (threadIdx.x == 0) { int i = 1; int j = 0; int k; uint mti; /* g_seeds[i] */ uint mtj; /* g_seeds[i - 1] */ mti = g_seeds[blockIdx.x][0]; for (k = N > length ? N : length; k != 0; k--) { mtj = mti; mti = g_seeds[blockIdx.x][i]; mti = (mti ^ ((mtj ^ (mtj >> 30)) * 1664525)) + seeds[j] + j; g_seeds[blockIdx.x][i] = mti; if (++i >= N) { g_seeds[blockIdx.x][0] = mti; i = 1; } if (++j >= length) { j = 0; } } for (k = N - 1; k != 0; k--) { mtj = mti; mti = g_seeds[blockIdx.x][i]; mti = (mti ^ ((mtj ^ (mtj >> 30)) * 1566083941)) - i; g_seeds[blockIdx.x][i] = mti; if (++i >= N) { g_seeds[blockIdx.x][0] = mti; i = 1; } } g_seeds[blockIdx.x][0] = mtNexti = 0x80000000; /* MSB is 1; assuring non-zero initial array */ } return; } /* Return next MT random by increasing thread ID, Good for 1 - 227 threads. * Note you should wind back MAX_THREADS to your max requirement * to keep auto allocation of shared mem to a minimum. * Best as a general purpose library routine. */ __device__ static uint mt19937g(void) { int kk; uint y; const int tid = threadIdx.x; __shared__ uint seed[MAX_THREADS + 1]; kk = mod(mtNext + tid, N); __syncthreads(); /* Finish with mtNext & g_seeds ready from last call & init */ seed[tid + 1] = g_seeds[blockIdx.x][mod(kk + 1, N)]; /* Sequential but not aligned */ if (tid == blockDim.x - 1) { mtNext = kk + 1; seed[0] = mtNexti; mtNexti = seed[blockDim.x]; } __syncthreads(); /* seed[] ready */ y = (seed[tid] & UPPER_MASK) | (seed[tid + 1] & LOWER_MASK); y = g_seeds[blockIdx.x][kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1]; g_seeds[blockIdx.x][kk] = y; /* Does not overlap above reads */ y ^= (y >> 11); /* Tempering */ y ^= (y << 7) & TEMPER1; y ^= (y << 15) & TEMPER2; y ^= (y >> 18); return y; } /* Generalised global memory version for any number of threads. * Note only runs up to 227 at a time, rest loop and block till all done. * Runs fractional warps at each end so not perfect utilisation. * Uses 228 words of auto allocated shared mem. */ __device__ static uint mt19937gl(void) { int jj; int kk; uint y; int tid; /* Offset thread ID */ __shared__ uint seed[N - M + 1]; kk = mod(mtNext + threadIdx.x, N); /* G80 limited to 512 threads */ __syncthreads(); /* Finish with mtNext & g_seeds set from init */ if (threadIdx.x == blockDim.x - 1) { mtNext = kk + 1; /* Modded next call */ } jj = 0; do { __syncthreads(); /* g_seeds set from last loop */ tid = threadIdx.x - jj; if (0 <= tid && tid < N - M) { seed[tid + 1] = g_seeds[blockIdx.x][mod(kk + 1, N)]; /* Sequential but not aligned */ y = min(N - M, blockDim.x - jj); if (tid == y - 1) /* Last thread this loop */ { seed[0] = mtNexti; mtNexti = seed[y]; } } __syncthreads(); /* seed[] ready */ if (0 <= tid && tid < N - M) { y = (seed[tid] & UPPER_MASK) | (seed[tid + 1] & LOWER_MASK); y = g_seeds[blockIdx.x][kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1]; g_seeds[blockIdx.x][kk] = y; /* Does not overlap reads above */ } } while ((jj += N - M) < blockDim.x); y ^= (y >> 11); /* Tempering */ y ^= (y << 7) & TEMPER1; y ^= (y << 15) & TEMPER2; y ^= (y >> 18); return y; } /************************************************************************************* * This is a shared memory implementation that keeps the full 626 words of state * in shared memory. Faster for heavy random work where you can afford shared mem. */ __shared__ int mtNexts; /* Start of next block of seeds */ __shared__ uint s_seeds[N + 1]; /* Init by single seed - single threaded as only used once */ __device__ static void mt19937si(uint seed) { int i; if (threadIdx.x == 0) { mtNexts = 0; s_seeds[0] = seed; for (i = 1; i < N; i++) { seed = (INIT_MULT * (seed ^ (seed >> 30)) + i); s_seeds[i] = seed; } } __syncthreads(); /* Ensure mtNexts set & needed for mt19937w() */ return; } /* Init by array - single threaded as only used once */ __device__ static void mt19937sai(uint* seeds, uint length) { mt19937si(ARRAY_SEED); if (threadIdx.x == 0) { int i = 1; int j = 0; int k; for (k = N > length ? N : length; k != 0; k--) { s_seeds[i] = (s_seeds[i] ^ ((s_seeds[i - 1] ^ (s_seeds[i - 1] >> 30)) * 1664525)) + seeds[j] + j; if (++i >= N) { s_seeds[0] = s_seeds[N - 1]; i = 1; } if (++j >= length) { j = 0; } } for (k = N - 1; k != 0; k--) { s_seeds[i] = (s_seeds[i] ^ ((s_seeds[i - 1] ^ (s_seeds[i - 1] >> 30)) * 1566083941)) - i; if (++i >= N) { s_seeds[0] = s_seeds[N - 1]; i = 1; } } s_seeds[0] = 0x80000000; /* MSB is 1; assuring non-zero initial array */ } __syncthreads(); /* Needed for mt19937w() */ return; } /* Return next MT random by increasing thread ID for 1-227 threads. */ __device__ static uint mt19937s(void) { int kk; uint y; const int tid = threadIdx.x; kk = mod(mtNexts + tid, N); __syncthreads(); /* Finished with mtNexts & s_seed[] ready from last run */ if (tid == blockDim.x - 1) { mtNexts = kk + 1; /* Will get modded on next call */ } y = (s_seeds[kk] & UPPER_MASK) | (s_seeds[kk + 1] & LOWER_MASK); y = s_seeds[kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1]; //y = s_seeds[kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ (y & 1 ? MATRIX_A : 0); // Same speed __syncthreads(); /* All done before we update */ s_seeds[kk] = y; if (kk == 0) /* Copy up for next round */ { s_seeds[N] = y; } y ^= (y >> 11); /* Tempering */ y ^= (y << 7) & TEMPER1; y ^= (y << 15) & TEMPER2; y ^= (y >> 18); return y; } /* General shared memory version for any number of threads. * Note only up to 227 threads are run at any one time, * the rest loop and block till all are done. */ __device__ static uint mt19937sl(void) { int jj; int kk; uint y; int tid; /* Offset thread ID */ kk = mod(mtNexts + threadIdx.x, N); /* G80 limited to 512 threads */ __syncthreads(); /* Finished with mtNexts & s_seed[] ready from init */ if (threadIdx.x == blockDim.x - 1) { mtNexts = kk + 1; /* Will get modded on next call */ } jj = 0; do { __syncthreads(); /* s_seeds[] ready from last loop */ tid = threadIdx.x - jj; if (0 <= tid && tid < N - M) { y = (s_seeds[kk] & UPPER_MASK) | (s_seeds[kk + 1] & LOWER_MASK); y = s_seeds[kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1]; } __syncthreads(); /* All done before we update */ if (0 <= tid && tid < N - M) { s_seeds[kk] = y; if (kk == 0) { s_seeds[N] = y; } } } while ((jj += N - M) < blockDim.x); y ^= (y >> 11); /* Tempering */ y ^= (y << 7) & TEMPER1; y ^= (y << 15) & TEMPER2; y ^= (y >> 18); return y; } /*************************************************************************************** * This is an implementation of a full step in 1 call - all 624 results returned at once * in pairs - 64 bit version. It may be run with 227-312 threads and will drop numbers * from the sequence if < 312 (not incorrect). * Original idea for this version was first presented by Brian Budge. */ #define B2 224 /* Size of second block */ __device__ static uint2 mt19937w(const int tid) { int kk; uint y; uint2 ret; kk = tid; /* First 227 */ if (kk < N-M) { y = (s_seeds[kk]&UPPER_MASK)|(s_seeds[kk+1]&LOWER_MASK); y = s_seeds[kk+M] ^ (y >> 1) ^ mag01[y & 1]; } __syncthreads(); if (kk < N-M) { s_seeds[kk] = y; if (kk == 0) { s_seeds[N] = y; } } kk += N-M; __syncthreads(); /* Next 224 */ if (kk < N-M + B2) { y = (s_seeds[kk]&UPPER_MASK)|(s_seeds[kk+1]&LOWER_MASK); y = s_seeds[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 1]; } __syncthreads(); if (kk < N-M + B2) { s_seeds[kk] = y; } kk += B2; __syncthreads(); /* Last 173 */ if (kk < N) { y = (s_seeds[kk]&UPPER_MASK)|(s_seeds[kk+1]&LOWER_MASK); y = s_seeds[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 1]; } __syncthreads(); if (kk < N) { s_seeds[kk] = y; } __syncthreads(); ret.x = s_seeds[2*tid]; ret.x ^= (ret.x >> 11); /* Tempering */ ret.x ^= (ret.x << 7) & TEMPER1; ret.x ^= (ret.x << 15) & TEMPER2; ret.x ^= (ret.x >> 18); ret.y = s_seeds[2*tid+1]; ret.y ^= (ret.y >> 11); ret.y ^= (ret.y << 7) & TEMPER1; ret.y ^= (ret.y << 15) & TEMPER2; ret.y ^= (ret.y >> 18); return ret; } /******************************************************************************* * For reference this is the original C single threaded source: */ #if 0 static unsigned long mt[N]; /* the array for the state vector */ static int mti=N+1; /* mti==N+1 means mt[N] is not initialized */ /* initializes mt[N] with a seed */ void init_genrand(unsigned long s) { mt[0]= s & 0xffffffffUL; for (mti=1; mti<N; mti++) { mt[mti] = (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti); /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt[mti] &= 0xffffffffUL; /* for >32 bit machines */ } } /* initialize by an array with array-length */ /* init_key is the array for initializing keys */ /* key_length is its length */ /* slight change for C++, 2004/2/26 */ void init_by_array(unsigned long init_key[], int key_length) { int i, j, k; init_genrand(19650218UL); i=1; j=0; k = (N>key_length ? N : key_length); for (; k; k--) { mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1664525UL)) + init_key[j] + j; /* non linear */ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ i++; j++; if (i>=N) { mt[0] = mt[N-1]; i=1; } if (j>=key_length) j=0; } for (k=N-1; k; k--) { mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941UL)) - i; /* non linear */ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ i++; if (i>=N) { mt[0] = mt[N-1]; i=1; } } mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */ } /* generates a random number on [0,0xffffffff]-interval */ unsigned long genrand_int32(void) { unsigned long y; static unsigned long mag01[2]={0x0UL, MATRIX_A}; /* mag01[x] = x * MATRIX_A for x=0,1 */ if (mti >= N) { /* generate N words at one time */ int kk; if (mti == N+1) /* if init_genrand() has not been called, */ init_genrand(5489UL); /* a default initial seed is used */ for (kk=0;kk<N-M;kk++) { y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK); mt[kk] = mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk<N-1;kk++) { y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK); mt[kk] = mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt[N-1]&UPPER_MASK)|(mt[0]&LOWER_MASK); mt[N-1] = mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mti = 0; } y = mt[mti++]; /* Tempering */ y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } #endif
1614c72692d224c103c92dafd82dc85d736fa163.cu
/* A multithreaded C-program for MT19937. Original single threaded C reference coded by Takuji Nishimurar and Makoto Matsumoto, with initialization improved 2002/1/26. Multithreaded C implementation coded by Eric Mills. Before using, initialize the state by using mt19937gi(seed) or mt19937gai(init_key, key_length) for the global memory versions or mt19937si(seed) or mt19937sai(init_key, key_length) for all shared memory versions. Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, All rights reserved. Multithreaded implementation Copyright (C) 2007, Eric Mills. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Any feedback is very welcome. http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space) */ #define NVG80 /* For Nvidia G80 achitecture where mod is VERY slow */ #ifdef NVG80 #define mod(x, y) ((x) < (y) ? (x) : (x) - (y)) /* Short mod - known input range */ #else #define mod(x, y) ((x) % (y)) #endif #ifdef _WIN32 typedef unsigned int uint; #endif #define N 624 #define M 397 #define INIT_MULT 1812433253 /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ #define ARRAY_SEED 19650218 /* Seed for initial setup before incorp array seed */ #define MATRIX_A 0x9908b0df /* Constant vector a */ #define UPPER_MASK 0x80000000 /* Most significant w-r bits */ #define LOWER_MASK 0x7fffffff /* Least significant r bits */ #define TEMPER1 0x9d2c5680 #define TEMPER2 0xefc60000 /* First a global memory implementation that uses 2 global reads and 1 global * write per result and keeps only 2 words of state in permanent shared memory. */ #define MAX_THREADS 227 /* Set to minimise shared memory allocation (max blockDim.x) */ #define MAX_BLOCKS 256 /* Set to minimise global memory allocation (max gridDim.x) */ __shared__ int mtNext; /* Start of next block of seeds */ __shared__ uint mtNexti; /* Indirect on above to save global read cycle */ __device__ uint g_seeds[MAX_BLOCKS][N]; __constant__ uint mag01[2] = {0, MATRIX_A}; /* 2 way bus conflict for each read */ /* Init by single seed - single threaded as only used once */ __device__ static void mt19937gi(uint seed) { int i; mtNext = 0; if (threadIdx.x == 0) { g_seeds[blockIdx.x][0] = mtNexti = seed; for (i = 1; i < N; i++) { seed = (INIT_MULT * (seed ^ (seed >> 30)) + i); g_seeds[blockIdx.x][i] = seed; } } return; } /* Init by array - single threaded as only used once, opt to reduce global refs */ __device__ static void mt19937gai(uint* seeds, uint length) { mt19937gi(ARRAY_SEED); if (threadIdx.x == 0) { int i = 1; int j = 0; int k; uint mti; /* g_seeds[i] */ uint mtj; /* g_seeds[i - 1] */ mti = g_seeds[blockIdx.x][0]; for (k = N > length ? N : length; k != 0; k--) { mtj = mti; mti = g_seeds[blockIdx.x][i]; mti = (mti ^ ((mtj ^ (mtj >> 30)) * 1664525)) + seeds[j] + j; g_seeds[blockIdx.x][i] = mti; if (++i >= N) { g_seeds[blockIdx.x][0] = mti; i = 1; } if (++j >= length) { j = 0; } } for (k = N - 1; k != 0; k--) { mtj = mti; mti = g_seeds[blockIdx.x][i]; mti = (mti ^ ((mtj ^ (mtj >> 30)) * 1566083941)) - i; g_seeds[blockIdx.x][i] = mti; if (++i >= N) { g_seeds[blockIdx.x][0] = mti; i = 1; } } g_seeds[blockIdx.x][0] = mtNexti = 0x80000000; /* MSB is 1; assuring non-zero initial array */ } return; } /* Return next MT random by increasing thread ID, Good for 1 - 227 threads. * Note you should wind back MAX_THREADS to your max requirement * to keep auto allocation of shared mem to a minimum. * Best as a general purpose library routine. */ __device__ static uint mt19937g(void) { int kk; uint y; const int tid = threadIdx.x; __shared__ uint seed[MAX_THREADS + 1]; kk = mod(mtNext + tid, N); __syncthreads(); /* Finish with mtNext & g_seeds ready from last call & init */ seed[tid + 1] = g_seeds[blockIdx.x][mod(kk + 1, N)]; /* Sequential but not aligned */ if (tid == blockDim.x - 1) { mtNext = kk + 1; seed[0] = mtNexti; mtNexti = seed[blockDim.x]; } __syncthreads(); /* seed[] ready */ y = (seed[tid] & UPPER_MASK) | (seed[tid + 1] & LOWER_MASK); y = g_seeds[blockIdx.x][kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1]; g_seeds[blockIdx.x][kk] = y; /* Does not overlap above reads */ y ^= (y >> 11); /* Tempering */ y ^= (y << 7) & TEMPER1; y ^= (y << 15) & TEMPER2; y ^= (y >> 18); return y; } /* Generalised global memory version for any number of threads. * Note only runs up to 227 at a time, rest loop and block till all done. * Runs fractional warps at each end so not perfect utilisation. * Uses 228 words of auto allocated shared mem. */ __device__ static uint mt19937gl(void) { int jj; int kk; uint y; int tid; /* Offset thread ID */ __shared__ uint seed[N - M + 1]; kk = mod(mtNext + threadIdx.x, N); /* G80 limited to 512 threads */ __syncthreads(); /* Finish with mtNext & g_seeds set from init */ if (threadIdx.x == blockDim.x - 1) { mtNext = kk + 1; /* Modded next call */ } jj = 0; do { __syncthreads(); /* g_seeds set from last loop */ tid = threadIdx.x - jj; if (0 <= tid && tid < N - M) { seed[tid + 1] = g_seeds[blockIdx.x][mod(kk + 1, N)]; /* Sequential but not aligned */ y = min(N - M, blockDim.x - jj); if (tid == y - 1) /* Last thread this loop */ { seed[0] = mtNexti; mtNexti = seed[y]; } } __syncthreads(); /* seed[] ready */ if (0 <= tid && tid < N - M) { y = (seed[tid] & UPPER_MASK) | (seed[tid + 1] & LOWER_MASK); y = g_seeds[blockIdx.x][kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1]; g_seeds[blockIdx.x][kk] = y; /* Does not overlap reads above */ } } while ((jj += N - M) < blockDim.x); y ^= (y >> 11); /* Tempering */ y ^= (y << 7) & TEMPER1; y ^= (y << 15) & TEMPER2; y ^= (y >> 18); return y; } /************************************************************************************* * This is a shared memory implementation that keeps the full 626 words of state * in shared memory. Faster for heavy random work where you can afford shared mem. */ __shared__ int mtNexts; /* Start of next block of seeds */ __shared__ uint s_seeds[N + 1]; /* Init by single seed - single threaded as only used once */ __device__ static void mt19937si(uint seed) { int i; if (threadIdx.x == 0) { mtNexts = 0; s_seeds[0] = seed; for (i = 1; i < N; i++) { seed = (INIT_MULT * (seed ^ (seed >> 30)) + i); s_seeds[i] = seed; } } __syncthreads(); /* Ensure mtNexts set & needed for mt19937w() */ return; } /* Init by array - single threaded as only used once */ __device__ static void mt19937sai(uint* seeds, uint length) { mt19937si(ARRAY_SEED); if (threadIdx.x == 0) { int i = 1; int j = 0; int k; for (k = N > length ? N : length; k != 0; k--) { s_seeds[i] = (s_seeds[i] ^ ((s_seeds[i - 1] ^ (s_seeds[i - 1] >> 30)) * 1664525)) + seeds[j] + j; if (++i >= N) { s_seeds[0] = s_seeds[N - 1]; i = 1; } if (++j >= length) { j = 0; } } for (k = N - 1; k != 0; k--) { s_seeds[i] = (s_seeds[i] ^ ((s_seeds[i - 1] ^ (s_seeds[i - 1] >> 30)) * 1566083941)) - i; if (++i >= N) { s_seeds[0] = s_seeds[N - 1]; i = 1; } } s_seeds[0] = 0x80000000; /* MSB is 1; assuring non-zero initial array */ } __syncthreads(); /* Needed for mt19937w() */ return; } /* Return next MT random by increasing thread ID for 1-227 threads. */ __device__ static uint mt19937s(void) { int kk; uint y; const int tid = threadIdx.x; kk = mod(mtNexts + tid, N); __syncthreads(); /* Finished with mtNexts & s_seed[] ready from last run */ if (tid == blockDim.x - 1) { mtNexts = kk + 1; /* Will get modded on next call */ } y = (s_seeds[kk] & UPPER_MASK) | (s_seeds[kk + 1] & LOWER_MASK); y = s_seeds[kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1]; //y = s_seeds[kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ (y & 1 ? MATRIX_A : 0); // Same speed __syncthreads(); /* All done before we update */ s_seeds[kk] = y; if (kk == 0) /* Copy up for next round */ { s_seeds[N] = y; } y ^= (y >> 11); /* Tempering */ y ^= (y << 7) & TEMPER1; y ^= (y << 15) & TEMPER2; y ^= (y >> 18); return y; } /* General shared memory version for any number of threads. * Note only up to 227 threads are run at any one time, * the rest loop and block till all are done. */ __device__ static uint mt19937sl(void) { int jj; int kk; uint y; int tid; /* Offset thread ID */ kk = mod(mtNexts + threadIdx.x, N); /* G80 limited to 512 threads */ __syncthreads(); /* Finished with mtNexts & s_seed[] ready from init */ if (threadIdx.x == blockDim.x - 1) { mtNexts = kk + 1; /* Will get modded on next call */ } jj = 0; do { __syncthreads(); /* s_seeds[] ready from last loop */ tid = threadIdx.x - jj; if (0 <= tid && tid < N - M) { y = (s_seeds[kk] & UPPER_MASK) | (s_seeds[kk + 1] & LOWER_MASK); y = s_seeds[kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1]; } __syncthreads(); /* All done before we update */ if (0 <= tid && tid < N - M) { s_seeds[kk] = y; if (kk == 0) { s_seeds[N] = y; } } } while ((jj += N - M) < blockDim.x); y ^= (y >> 11); /* Tempering */ y ^= (y << 7) & TEMPER1; y ^= (y << 15) & TEMPER2; y ^= (y >> 18); return y; } /*************************************************************************************** * This is an implementation of a full step in 1 call - all 624 results returned at once * in pairs - 64 bit version. It may be run with 227-312 threads and will drop numbers * from the sequence if < 312 (not incorrect). * Original idea for this version was first presented by Brian Budge. */ #define B2 224 /* Size of second block */ __device__ static uint2 mt19937w(const int tid) { int kk; uint y; uint2 ret; kk = tid; /* First 227 */ if (kk < N-M) { y = (s_seeds[kk]&UPPER_MASK)|(s_seeds[kk+1]&LOWER_MASK); y = s_seeds[kk+M] ^ (y >> 1) ^ mag01[y & 1]; } __syncthreads(); if (kk < N-M) { s_seeds[kk] = y; if (kk == 0) { s_seeds[N] = y; } } kk += N-M; __syncthreads(); /* Next 224 */ if (kk < N-M + B2) { y = (s_seeds[kk]&UPPER_MASK)|(s_seeds[kk+1]&LOWER_MASK); y = s_seeds[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 1]; } __syncthreads(); if (kk < N-M + B2) { s_seeds[kk] = y; } kk += B2; __syncthreads(); /* Last 173 */ if (kk < N) { y = (s_seeds[kk]&UPPER_MASK)|(s_seeds[kk+1]&LOWER_MASK); y = s_seeds[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 1]; } __syncthreads(); if (kk < N) { s_seeds[kk] = y; } __syncthreads(); ret.x = s_seeds[2*tid]; ret.x ^= (ret.x >> 11); /* Tempering */ ret.x ^= (ret.x << 7) & TEMPER1; ret.x ^= (ret.x << 15) & TEMPER2; ret.x ^= (ret.x >> 18); ret.y = s_seeds[2*tid+1]; ret.y ^= (ret.y >> 11); ret.y ^= (ret.y << 7) & TEMPER1; ret.y ^= (ret.y << 15) & TEMPER2; ret.y ^= (ret.y >> 18); return ret; } /******************************************************************************* * For reference this is the original C single threaded source: */ #if 0 static unsigned long mt[N]; /* the array for the state vector */ static int mti=N+1; /* mti==N+1 means mt[N] is not initialized */ /* initializes mt[N] with a seed */ void init_genrand(unsigned long s) { mt[0]= s & 0xffffffffUL; for (mti=1; mti<N; mti++) { mt[mti] = (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti); /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt[mti] &= 0xffffffffUL; /* for >32 bit machines */ } } /* initialize by an array with array-length */ /* init_key is the array for initializing keys */ /* key_length is its length */ /* slight change for C++, 2004/2/26 */ void init_by_array(unsigned long init_key[], int key_length) { int i, j, k; init_genrand(19650218UL); i=1; j=0; k = (N>key_length ? N : key_length); for (; k; k--) { mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1664525UL)) + init_key[j] + j; /* non linear */ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ i++; j++; if (i>=N) { mt[0] = mt[N-1]; i=1; } if (j>=key_length) j=0; } for (k=N-1; k; k--) { mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941UL)) - i; /* non linear */ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ i++; if (i>=N) { mt[0] = mt[N-1]; i=1; } } mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */ } /* generates a random number on [0,0xffffffff]-interval */ unsigned long genrand_int32(void) { unsigned long y; static unsigned long mag01[2]={0x0UL, MATRIX_A}; /* mag01[x] = x * MATRIX_A for x=0,1 */ if (mti >= N) { /* generate N words at one time */ int kk; if (mti == N+1) /* if init_genrand() has not been called, */ init_genrand(5489UL); /* a default initial seed is used */ for (kk=0;kk<N-M;kk++) { y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK); mt[kk] = mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk<N-1;kk++) { y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK); mt[kk] = mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt[N-1]&UPPER_MASK)|(mt[0]&LOWER_MASK); mt[N-1] = mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mti = 0; } y = mt[mti++]; /* Tempering */ y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } #endif
e825d8ca0284a8ecf4b3b0685545a988252017da.hip
// !!! This is a file automatically generated by hipify!!! // Copyright Contributors to the Open Shading Language project. // SPDX-License-Identifier: BSD-3-Clause // https://github.com/AcademySoftwareFoundation/OpenShadingLanguage #include <optix.h> #include <optix_device.h> #if (OPTIX_VERSION < 70000) #include <optix_math.h> #else #define OPTIX_COMPATIBILITY 7 #include <optix_device.h> #include <hip/hip_runtime.h> #endif #include <OSL/oslclosure.h> #include "rend_lib.h" #if (OPTIX_VERSION < 70000) rtDeclareVariable (uint2, launch_index, rtLaunchIndex, ); rtDeclareVariable (uint2, launch_dim, rtLaunchDim, ); rtDeclareVariable (char*, test_str_1, , ); rtDeclareVariable (char*, test_str_2, , ); OSL_NAMESPACE_ENTER namespace pvt { rtBuffer<char,1> s_color_system; } OSL_NAMESPACE_EXIT // Taken from the SimplePool class __device__ static inline size_t alignment_offset_calc(void* ptr, size_t alignment) { uintptr_t ptrbits = reinterpret_cast<uintptr_t>(ptr); uintptr_t offset = ((ptrbits + alignment - 1) & -alignment) - ptrbits; return offset; } // These functions are declared extern to prevent name mangling. extern "C" { __device__ void* closure_component_allot (void* pool, int id, size_t prim_size, const OSL::Color3& w) { ((OSL::ClosureComponent*) pool)->id = id; ((OSL::ClosureComponent*) pool)->w = w; size_t needed = (sizeof(OSL::ClosureComponent) - sizeof(void*) + prim_size + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return (void*) &char_ptr[needed]; } __device__ void* closure_mul_allot (void* pool, const OSL::Color3& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*) pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*) pool)->weight = w; ((OSL::ClosureMul*) pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* closure_mul_float_allot (void* pool, const float& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*) pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*) pool)->weight.x = w; ((OSL::ClosureMul*) pool)->weight.y = w; ((OSL::ClosureMul*) pool)->weight.z = w; ((OSL::ClosureMul*) pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* closure_add_allot (void* pool, OSL::ClosureColor* a, OSL::ClosureColor* b) { ((OSL::ClosureAdd*) pool)->id = OSL::ClosureColor::ADD; ((OSL::ClosureAdd*) pool)->closureA = a; ((OSL::ClosureAdd*) pool)->closureB = b; size_t needed = (sizeof(OSL::ClosureAdd) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* osl_allocate_closure_component (void* sg_, int id, int size) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; OSL::Color3 w = OSL::Color3 (1, 1, 1); // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); size = max (4, size); sg_ptr->renderstate = closure_component_allot (ret, id, size, w); return ret; } __device__ void* osl_allocate_weighted_closure_component (void* sg_, int id, int size, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } size = max (4, size); // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_component_allot (ret, id, size, *w); return ret; } __device__ void* osl_mul_closure_color (void* sg_, OSL::ClosureColor* a, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL) { return NULL; } if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } if (w->x == 1.0f && w->y == 1.0f && w->z == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_allot (ret, *w, a); return ret; } __device__ void* osl_mul_closure_float (void* sg_, OSL::ClosureColor* a, float w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL || w == 0.0f) { return NULL; } if (w == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_float_allot (ret, w, a); return ret; } __device__ void* osl_add_closure_closure (void* sg_, OSL::ClosureColor* a, OSL::ClosureColor* b) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL) { return b; } if (b == NULL) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_add_allot (ret, a, b); return ret; } #define IS_STRING(type) (type.basetype == OSL::TypeDesc::STRING) #define IS_PTR(type) (type.basetype == OSL::TypeDesc::PTR) #define IS_COLOR(type) (type.vecsemantics == OSL::TypeDesc::COLOR) __device__ bool rend_get_userdata (OSL::StringParam name, void* data, int data_size, const OSL::TypeDesc& type, int index) { // Perform a userdata lookup using the parameter name, type, and // userdata index. If there is a match, memcpy the value into data and // return 1. if (IS_PTR(type) && name == STRING_PARAMS(colorsystem)) { *(void**)data = &OSL::pvt::s_color_system[0]; return true; } // TODO: This is temporary code for initial testing and demonstration. if (IS_STRING(type) && name == HDSTR(test_str_1)) { memcpy (data, &test_str_2, 8); return true; } return false; } #undef IS_COLOR #undef IS_STRING #undef IS_PTR __device__ int osl_bind_interpolated_param (void *sg_, const void *name, long long type, int userdata_has_derivs, void *userdata_data, int symbol_has_derivs, void *symbol_data, int symbol_data_size, char *userdata_initialized, int userdata_index) { char status = *userdata_initialized; if (status == 0) { bool ok = rend_get_userdata (HDSTR(name), userdata_data, symbol_data_size, (*(OSL::TypeDesc*)&type), userdata_index); *userdata_initialized = status = 1 + ok; } if (status == 2) { memcpy (symbol_data, userdata_data, symbol_data_size ); return 1; } return 0; } __device__ int osl_strlen_is (const char *str) { return HDSTR(str).length(); } __device__ int osl_hash_is (const char *str) { return HDSTR(str).hash(); } __device__ int osl_getchar_isi (const char *str, int index) { return (str && unsigned(index) < HDSTR(str).length()) ? str[index] : 0; } __device__ void osl_printf (void* sg_, char* fmt_str, void* args) { // This can be used to limit printing to one Cuda thread for debugging // if (launch_index.x == 0 && launch_index.y == 0) // vprintf(fmt_str, (const char*) args); } __device__ void* osl_get_noise_options (void *sg_) { ShaderGlobals* sg = ((ShaderGlobals*)sg_); NoiseOptCUDA* opt = (NoiseOptCUDA*)((ShadingContextCUDA*)sg->context)->noise_options_ptr(); new (opt) NoiseOptCUDA; return opt; } __device__ void* osl_get_texture_options (void *sg_) { return 0; } __device__ void osl_texture_set_interp_code(void *opt, int mode) { // ((TextureOpt *)opt)->interpmode = (TextureOpt::InterpMode)mode; } __device__ void osl_texture_set_stwrap_code (void *opt, int mode) { //((TextureOpt *)opt)->swrap = (TextureOpt::Wrap)mode; //((TextureOpt *)opt)->twrap = (TextureOpt::Wrap)mode; } __device__ int osl_texture (void *sg_, const char *name, void *handle, void *opt_, float s, float t, float dsdx, float dtdx, float dsdy, float dtdy, int chans, void *result, void *dresultdx, void *dresultdy, void *alpha, void *dalphadx, void *dalphady, void *ustring_errormessage) { if (!handle) return 0; int64_t texID = int64_t(handle); *((float3*)result) = make_float3(optix::rtTex2D<float4>(texID, s, t)); return 1; } __device__ int osl_range_check_err (int indexvalue, int length, const char *symname, void *sg, const void *sourcefile, int sourceline, const char *groupname, int layer, const char *layername, const char *shadername) { if (indexvalue < 0 || indexvalue >= length) { return indexvalue < 0 ? 0 : length-1; } return indexvalue; } __device__ int osl_range_check (int indexvalue, int length, const char *symname, void *sg, const void *sourcefile, int sourceline, const char *groupname, int layer, const char *layername, const char *shadername) { if (indexvalue < 0 || indexvalue >= length) { indexvalue = osl_range_check_err (indexvalue, length, symname, sg, sourcefile, sourceline, groupname, layer, layername, shadername); } return indexvalue; } #define MAT(m) (*(OSL::Matrix44 *)m) __device__ int osl_get_matrix (void *sg_, void *r, const char *from) { ShaderGlobals *sg = (ShaderGlobals *)sg_; //ShadingContext *ctx = (ShadingContext *)sg->context; if (HDSTR(from) == STRING_PARAMS(common) || //HDSTR(from) == ctx->shadingsys().commonspace_synonym() || HDSTR(from) == STRING_PARAMS(shader)) { MAT(r).makeIdentity (); return true; } if (HDSTR(from) == STRING_PARAMS(object)) { // TODO: Implement transform return false; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } __device__ int osl_get_inverse_matrix (void *sg_, void *r, const char *to) { ShaderGlobals *sg = (ShaderGlobals *)sg_; //ShadingContext *ctx = (ShadingContext *)sg->context; if (HDSTR(to) == STRING_PARAMS(common) || //HDSTR(to) == ctx->shadingsys().commonspace_synonym() || HDSTR(to) == STRING_PARAMS(shader)) { MAT(r).makeIdentity (); return true; } if (HDSTR(to) == STRING_PARAMS(object)) { // TODO: Implement transform return false; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } #undef MAT } #else //#if (OPTIX_VERSION < 70000) OSL_NAMESPACE_ENTER namespace pvt { __device__ hipDeviceptr_t s_color_system = 0; __device__ hipDeviceptr_t osl_printf_buffer_start = 0; __device__ hipDeviceptr_t osl_printf_buffer_end = 0; __device__ uint64_t test_str_1 = 0; __device__ uint64_t test_str_2 = 0; } OSL_NAMESPACE_EXIT // Taken from the SimplePool class __device__ static inline size_t alignment_offset_calc (void* ptr, size_t alignment) { uintptr_t ptrbits = reinterpret_cast<uintptr_t>(ptr); uintptr_t offset = ((ptrbits + alignment - 1) & -alignment) - ptrbits; return offset; } // These functions are declared extern to prevent name mangling. extern "C" { // add OptiX entry point to prevent OptiX from discarding the module __global__ void __direct_callable__dummy_rend_lib() { } __device__ void* closure_component_allot (void* pool, int id, size_t prim_size, const OSL::Color3& w) { ((OSL::ClosureComponent*) pool)->id = id; ((OSL::ClosureComponent*) pool)->w = w; size_t needed = (sizeof(OSL::ClosureComponent) - sizeof(void*) + prim_size + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return (void*) &char_ptr[needed]; } __device__ void* closure_mul_allot (void* pool, const OSL::Color3& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*) pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*) pool)->weight = w; ((OSL::ClosureMul*) pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* closure_mul_float_allot (void* pool, const float& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*) pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*) pool)->weight.x = w; ((OSL::ClosureMul*) pool)->weight.y = w; ((OSL::ClosureMul*) pool)->weight.z = w; ((OSL::ClosureMul*) pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* closure_add_allot (void* pool, OSL::ClosureColor* a, OSL::ClosureColor* b) { ((OSL::ClosureAdd*) pool)->id = OSL::ClosureColor::ADD; ((OSL::ClosureAdd*) pool)->closureA = a; ((OSL::ClosureAdd*) pool)->closureB = b; size_t needed = (sizeof(OSL::ClosureAdd) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* osl_allocate_closure_component (void* sg_, int id, int size) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; OSL::Color3 w = OSL::Color3 (1, 1, 1); // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); size = max (4, size); sg_ptr->renderstate = closure_component_allot (ret, id, size, w); return ret; } __device__ void* osl_allocate_weighted_closure_component (void* sg_, int id, int size, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } size = max (4, size); // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_component_allot (ret, id, size, *w); return ret; } __device__ void* osl_mul_closure_color (void* sg_, OSL::ClosureColor* a, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL) { return NULL; } if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } if (w->x == 1.0f && w->y == 1.0f && w->z == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_allot (ret, *w, a); return ret; } __device__ void* osl_mul_closure_float (void* sg_, OSL::ClosureColor* a, float w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL || w == 0.0f) { return NULL; } if (w == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_float_allot (ret, w, a); return ret; } __device__ void* osl_add_closure_closure (void* sg_, OSL::ClosureColor* a, OSL::ClosureColor* b) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL) { return b; } if (b == NULL) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_add_allot (ret, a, b); return ret; } #define IS_STRING(type) (type.basetype == OSL::TypeDesc::STRING) #define IS_PTR(type) (type.basetype == OSL::TypeDesc::PTR) #define IS_COLOR(type) (type.vecsemantics == OSL::TypeDesc::COLOR) __device__ bool rend_get_userdata (OSL::StringParam name, void* data, int data_size, const OSL::TypeDesc& type, int index) { // Perform a userdata lookup using the parameter name, type, and // userdata index. If there is a match, memcpy the value into data and // return 1. if (IS_PTR(type) && name.hash() == STRING_PARAMS(colorsystem)) { *(void**)data = *reinterpret_cast<void **>(&OSL::pvt::s_color_system); return true; } // TODO: This is temporary code for initial testing and demonstration. if (IS_STRING(type) && name == HDSTR(OSL::pvt::test_str_1)) { memcpy (data, &OSL::pvt::test_str_2, 8); return true; } return false; } #undef IS_COLOR #undef IS_STRING #undef IS_PTR __device__ int osl_bind_interpolated_param (void *sg_, const void *name, long long type, int userdata_has_derivs, void *userdata_data, int symbol_has_derivs, void *symbol_data, int symbol_data_size, char *userdata_initialized, int userdata_index) { char status = *userdata_initialized; if (status == 0) { bool ok = rend_get_userdata (HDSTR(name), userdata_data, symbol_data_size, (*(OSL::TypeDesc*)&type), userdata_index); *userdata_initialized = status = 1 + ok; } if (status == 2) { memcpy (symbol_data, userdata_data, symbol_data_size ); return 1; } return 0; } __device__ int osl_strlen_is (const char *str) { //return HDSTR(str).length(); return 0; } __device__ int osl_hash_is (const char *str) { return HDSTR(str); } __device__ int osl_getchar_isi (const char *str, int index) { // return (str && unsigned(index) < HDSTR(str).length()) // ? str[index] : 0; return 0; } // Printing is handled by the host. Copy format string's hash and // all the arguments to our print buffer. // Note: the first element of 'args' is the size of the argument list __device__ void osl_printf (void* sg_, char* fmt_str, void* args) { uint64_t fmt_str_hash = HDSTR(fmt_str).hash(); uint64_t args_size = reinterpret_cast<uint64_t*>(args)[0]; // This can be used to limit printing to one Cuda thread for debugging // if (launch_index.x == 0 && launch_index.y == 0) hipDeviceptr_t copy_start = atomicAdd(&OSL::pvt::osl_printf_buffer_start, args_size + sizeof(args_size) + sizeof(fmt_str_hash)); // Only perform copy if there's enough space if (copy_start + args_size + sizeof(args_size) + sizeof(fmt_str_hash) < OSL::pvt::osl_printf_buffer_end) { memcpy(reinterpret_cast<void *>(copy_start), &fmt_str_hash, sizeof(fmt_str_hash) ); memcpy(reinterpret_cast<void *>(copy_start + sizeof(fmt_str_hash)), &args_size, sizeof(args_size) ); memcpy(reinterpret_cast<void *>(copy_start + sizeof(fmt_str_hash) + sizeof(args_size)), reinterpret_cast<char *>(args) + sizeof(args_size), args_size); } } __device__ void* osl_get_noise_options (void *sg_) { ShaderGlobals* sg = ((ShaderGlobals*)sg_); NoiseOptCUDA* opt = (NoiseOptCUDA*)((ShadingContextCUDA*)sg->context)->noise_options_ptr(); new (opt) NoiseOptCUDA; return opt; } __device__ void* osl_get_texture_options (void *sg_) { return 0; } __device__ void osl_texture_set_interp_code(void *opt, int mode) { // ((TextureOpt *)opt)->interpmode = (TextureOpt::InterpMode)mode; } __device__ void osl_texture_set_stwrap_code (void *opt, int mode) { //((TextureOpt *)opt)->swrap = (TextureOpt::Wrap)mode; //((TextureOpt *)opt)->twrap = (TextureOpt::Wrap)mode; } __forceinline__ __device__ float3 make_float3(const float4& a) { return make_float3(a.x, a.y, a.z); } // FIXME: // clang++ 9.0 seems to have trouble with tex2d<float4>() look-ups, // so we'll declare this external and implement texture lookups in // CUDA files compiled by nvcc (optix_grid_renderer.cu and // optix_raytrace.cu). // (clang++ 9.0 error 'undefined __nv_tex_surf_handler') extern __device__ float4 osl_tex2DLookup(void *handle, float s, float t); __device__ int osl_texture (void *sg_, const char *name, void *handle, void *opt_, float s, float t, float dsdx, float dtdx, float dsdy, float dtdy, int chans, void *result, void *dresultdx, void *dresultdy, void *alpha, void *dalphadx, void *dalphady, void *ustring_errormessage) { if (!handle) return 0; hipTextureObject_t texID = hipTextureObject_t(handle); float4 fromTexture = osl_tex2DLookup(handle, s, t); // see note above // float4 fromTexture = tex2D<float4>(texID, s, t); *((float3*)result) = make_float3(fromTexture.x, fromTexture.y, fromTexture.z); return 1; } __device__ int osl_range_check_err (int indexvalue, int length, const char *symname, void *sg, const void *sourcefile, int sourceline, const char *groupname, int layer, const char *layername, const char *shadername) { if (indexvalue < 0 || indexvalue >= length) { return indexvalue < 0 ? 0 : length-1; } return indexvalue; } __device__ int osl_range_check (int indexvalue, int length, const char *symname, void *sg, const void *sourcefile, int sourceline, const char *groupname, int layer, const char *layername, const char *shadername) { if (indexvalue < 0 || indexvalue >= length) { indexvalue = osl_range_check_err (indexvalue, length, symname, sg, sourcefile, sourceline, groupname, layer, layername, shadername); } return indexvalue; } #define MAT(m) (*(OSL::Matrix44 *)m) __device__ int osl_get_matrix (void *sg_, void *r, const char *from) { ShaderGlobals *sg = (ShaderGlobals *)sg_; //ShadingContext *ctx = (ShadingContext *)sg->context; if (HDSTR(from) == STRING_PARAMS(common) || //HDSTR(from) == ctx->shadingsys().commonspace_synonym() || HDSTR(from) == STRING_PARAMS(shader)) { MAT(r).makeIdentity (); return true; } if (HDSTR(from) == STRING_PARAMS(object)) { // TODO: Implement transform return false; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } __device__ int osl_get_inverse_matrix (void *sg_, void *r, const char *to) { ShaderGlobals *sg = (ShaderGlobals *)sg_; if (HDSTR(to) == STRING_PARAMS(common) || //HDSTR(to) == ctx->shadingsys().commonspace_synonym() || HDSTR(to) == STRING_PARAMS(shader)) { MAT(r).makeIdentity (); return true; } if (HDSTR(to) == STRING_PARAMS(object)) { // TODO: Implement transform return false; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } #undef MAT } #endif //#if (OPTIX_VERSION < 70000)
e825d8ca0284a8ecf4b3b0685545a988252017da.cu
// Copyright Contributors to the Open Shading Language project. // SPDX-License-Identifier: BSD-3-Clause // https://github.com/AcademySoftwareFoundation/OpenShadingLanguage #include <optix.h> #include <optix_device.h> #if (OPTIX_VERSION < 70000) #include <optix_math.h> #else #define OPTIX_COMPATIBILITY 7 #include <optix_device.h> #include <cuda_runtime.h> #endif #include <OSL/oslclosure.h> #include "rend_lib.h" #if (OPTIX_VERSION < 70000) rtDeclareVariable (uint2, launch_index, rtLaunchIndex, ); rtDeclareVariable (uint2, launch_dim, rtLaunchDim, ); rtDeclareVariable (char*, test_str_1, , ); rtDeclareVariable (char*, test_str_2, , ); OSL_NAMESPACE_ENTER namespace pvt { rtBuffer<char,1> s_color_system; } OSL_NAMESPACE_EXIT // Taken from the SimplePool class __device__ static inline size_t alignment_offset_calc(void* ptr, size_t alignment) { uintptr_t ptrbits = reinterpret_cast<uintptr_t>(ptr); uintptr_t offset = ((ptrbits + alignment - 1) & -alignment) - ptrbits; return offset; } // These functions are declared extern to prevent name mangling. extern "C" { __device__ void* closure_component_allot (void* pool, int id, size_t prim_size, const OSL::Color3& w) { ((OSL::ClosureComponent*) pool)->id = id; ((OSL::ClosureComponent*) pool)->w = w; size_t needed = (sizeof(OSL::ClosureComponent) - sizeof(void*) + prim_size + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return (void*) &char_ptr[needed]; } __device__ void* closure_mul_allot (void* pool, const OSL::Color3& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*) pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*) pool)->weight = w; ((OSL::ClosureMul*) pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* closure_mul_float_allot (void* pool, const float& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*) pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*) pool)->weight.x = w; ((OSL::ClosureMul*) pool)->weight.y = w; ((OSL::ClosureMul*) pool)->weight.z = w; ((OSL::ClosureMul*) pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* closure_add_allot (void* pool, OSL::ClosureColor* a, OSL::ClosureColor* b) { ((OSL::ClosureAdd*) pool)->id = OSL::ClosureColor::ADD; ((OSL::ClosureAdd*) pool)->closureA = a; ((OSL::ClosureAdd*) pool)->closureB = b; size_t needed = (sizeof(OSL::ClosureAdd) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* osl_allocate_closure_component (void* sg_, int id, int size) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; OSL::Color3 w = OSL::Color3 (1, 1, 1); // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); size = max (4, size); sg_ptr->renderstate = closure_component_allot (ret, id, size, w); return ret; } __device__ void* osl_allocate_weighted_closure_component (void* sg_, int id, int size, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } size = max (4, size); // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_component_allot (ret, id, size, *w); return ret; } __device__ void* osl_mul_closure_color (void* sg_, OSL::ClosureColor* a, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL) { return NULL; } if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } if (w->x == 1.0f && w->y == 1.0f && w->z == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_allot (ret, *w, a); return ret; } __device__ void* osl_mul_closure_float (void* sg_, OSL::ClosureColor* a, float w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL || w == 0.0f) { return NULL; } if (w == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_float_allot (ret, w, a); return ret; } __device__ void* osl_add_closure_closure (void* sg_, OSL::ClosureColor* a, OSL::ClosureColor* b) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL) { return b; } if (b == NULL) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_add_allot (ret, a, b); return ret; } #define IS_STRING(type) (type.basetype == OSL::TypeDesc::STRING) #define IS_PTR(type) (type.basetype == OSL::TypeDesc::PTR) #define IS_COLOR(type) (type.vecsemantics == OSL::TypeDesc::COLOR) __device__ bool rend_get_userdata (OSL::StringParam name, void* data, int data_size, const OSL::TypeDesc& type, int index) { // Perform a userdata lookup using the parameter name, type, and // userdata index. If there is a match, memcpy the value into data and // return 1. if (IS_PTR(type) && name == STRING_PARAMS(colorsystem)) { *(void**)data = &OSL::pvt::s_color_system[0]; return true; } // TODO: This is temporary code for initial testing and demonstration. if (IS_STRING(type) && name == HDSTR(test_str_1)) { memcpy (data, &test_str_2, 8); return true; } return false; } #undef IS_COLOR #undef IS_STRING #undef IS_PTR __device__ int osl_bind_interpolated_param (void *sg_, const void *name, long long type, int userdata_has_derivs, void *userdata_data, int symbol_has_derivs, void *symbol_data, int symbol_data_size, char *userdata_initialized, int userdata_index) { char status = *userdata_initialized; if (status == 0) { bool ok = rend_get_userdata (HDSTR(name), userdata_data, symbol_data_size, (*(OSL::TypeDesc*)&type), userdata_index); *userdata_initialized = status = 1 + ok; } if (status == 2) { memcpy (symbol_data, userdata_data, symbol_data_size ); return 1; } return 0; } __device__ int osl_strlen_is (const char *str) { return HDSTR(str).length(); } __device__ int osl_hash_is (const char *str) { return HDSTR(str).hash(); } __device__ int osl_getchar_isi (const char *str, int index) { return (str && unsigned(index) < HDSTR(str).length()) ? str[index] : 0; } __device__ void osl_printf (void* sg_, char* fmt_str, void* args) { // This can be used to limit printing to one Cuda thread for debugging // if (launch_index.x == 0 && launch_index.y == 0) // vprintf(fmt_str, (const char*) args); } __device__ void* osl_get_noise_options (void *sg_) { ShaderGlobals* sg = ((ShaderGlobals*)sg_); NoiseOptCUDA* opt = (NoiseOptCUDA*)((ShadingContextCUDA*)sg->context)->noise_options_ptr(); new (opt) NoiseOptCUDA; return opt; } __device__ void* osl_get_texture_options (void *sg_) { return 0; } __device__ void osl_texture_set_interp_code(void *opt, int mode) { // ((TextureOpt *)opt)->interpmode = (TextureOpt::InterpMode)mode; } __device__ void osl_texture_set_stwrap_code (void *opt, int mode) { //((TextureOpt *)opt)->swrap = (TextureOpt::Wrap)mode; //((TextureOpt *)opt)->twrap = (TextureOpt::Wrap)mode; } __device__ int osl_texture (void *sg_, const char *name, void *handle, void *opt_, float s, float t, float dsdx, float dtdx, float dsdy, float dtdy, int chans, void *result, void *dresultdx, void *dresultdy, void *alpha, void *dalphadx, void *dalphady, void *ustring_errormessage) { if (!handle) return 0; int64_t texID = int64_t(handle); *((float3*)result) = make_float3(optix::rtTex2D<float4>(texID, s, t)); return 1; } __device__ int osl_range_check_err (int indexvalue, int length, const char *symname, void *sg, const void *sourcefile, int sourceline, const char *groupname, int layer, const char *layername, const char *shadername) { if (indexvalue < 0 || indexvalue >= length) { return indexvalue < 0 ? 0 : length-1; } return indexvalue; } __device__ int osl_range_check (int indexvalue, int length, const char *symname, void *sg, const void *sourcefile, int sourceline, const char *groupname, int layer, const char *layername, const char *shadername) { if (indexvalue < 0 || indexvalue >= length) { indexvalue = osl_range_check_err (indexvalue, length, symname, sg, sourcefile, sourceline, groupname, layer, layername, shadername); } return indexvalue; } #define MAT(m) (*(OSL::Matrix44 *)m) __device__ int osl_get_matrix (void *sg_, void *r, const char *from) { ShaderGlobals *sg = (ShaderGlobals *)sg_; //ShadingContext *ctx = (ShadingContext *)sg->context; if (HDSTR(from) == STRING_PARAMS(common) || //HDSTR(from) == ctx->shadingsys().commonspace_synonym() || HDSTR(from) == STRING_PARAMS(shader)) { MAT(r).makeIdentity (); return true; } if (HDSTR(from) == STRING_PARAMS(object)) { // TODO: Implement transform return false; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } __device__ int osl_get_inverse_matrix (void *sg_, void *r, const char *to) { ShaderGlobals *sg = (ShaderGlobals *)sg_; //ShadingContext *ctx = (ShadingContext *)sg->context; if (HDSTR(to) == STRING_PARAMS(common) || //HDSTR(to) == ctx->shadingsys().commonspace_synonym() || HDSTR(to) == STRING_PARAMS(shader)) { MAT(r).makeIdentity (); return true; } if (HDSTR(to) == STRING_PARAMS(object)) { // TODO: Implement transform return false; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } #undef MAT } #else //#if (OPTIX_VERSION < 70000) OSL_NAMESPACE_ENTER namespace pvt { __device__ CUdeviceptr s_color_system = 0; __device__ CUdeviceptr osl_printf_buffer_start = 0; __device__ CUdeviceptr osl_printf_buffer_end = 0; __device__ uint64_t test_str_1 = 0; __device__ uint64_t test_str_2 = 0; } OSL_NAMESPACE_EXIT // Taken from the SimplePool class __device__ static inline size_t alignment_offset_calc (void* ptr, size_t alignment) { uintptr_t ptrbits = reinterpret_cast<uintptr_t>(ptr); uintptr_t offset = ((ptrbits + alignment - 1) & -alignment) - ptrbits; return offset; } // These functions are declared extern to prevent name mangling. extern "C" { // add OptiX entry point to prevent OptiX from discarding the module __global__ void __direct_callable__dummy_rend_lib() { } __device__ void* closure_component_allot (void* pool, int id, size_t prim_size, const OSL::Color3& w) { ((OSL::ClosureComponent*) pool)->id = id; ((OSL::ClosureComponent*) pool)->w = w; size_t needed = (sizeof(OSL::ClosureComponent) - sizeof(void*) + prim_size + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return (void*) &char_ptr[needed]; } __device__ void* closure_mul_allot (void* pool, const OSL::Color3& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*) pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*) pool)->weight = w; ((OSL::ClosureMul*) pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* closure_mul_float_allot (void* pool, const float& w, OSL::ClosureColor* c) { ((OSL::ClosureMul*) pool)->id = OSL::ClosureColor::MUL; ((OSL::ClosureMul*) pool)->weight.x = w; ((OSL::ClosureMul*) pool)->weight.y = w; ((OSL::ClosureMul*) pool)->weight.z = w; ((OSL::ClosureMul*) pool)->closure = c; size_t needed = (sizeof(OSL::ClosureMul) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* closure_add_allot (void* pool, OSL::ClosureColor* a, OSL::ClosureColor* b) { ((OSL::ClosureAdd*) pool)->id = OSL::ClosureColor::ADD; ((OSL::ClosureAdd*) pool)->closureA = a; ((OSL::ClosureAdd*) pool)->closureB = b; size_t needed = (sizeof(OSL::ClosureAdd) + (alignof(OSL::ClosureComponent) - 1)) & ~(alignof(OSL::ClosureComponent) - 1); char* char_ptr = (char*) pool; return &char_ptr[needed]; } __device__ void* osl_allocate_closure_component (void* sg_, int id, int size) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; OSL::Color3 w = OSL::Color3 (1, 1, 1); // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); size = max (4, size); sg_ptr->renderstate = closure_component_allot (ret, id, size, w); return ret; } __device__ void* osl_allocate_weighted_closure_component (void* sg_, int id, int size, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } size = max (4, size); // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_component_allot (ret, id, size, *w); return ret; } __device__ void* osl_mul_closure_color (void* sg_, OSL::ClosureColor* a, const OSL::Color3* w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL) { return NULL; } if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) { return NULL; } if (w->x == 1.0f && w->y == 1.0f && w->z == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_allot (ret, *w, a); return ret; } __device__ void* osl_mul_closure_float (void* sg_, OSL::ClosureColor* a, float w) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL || w == 0.0f) { return NULL; } if (w == 1.0f) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_mul_float_allot (ret, w, a); return ret; } __device__ void* osl_add_closure_closure (void* sg_, OSL::ClosureColor* a, OSL::ClosureColor* b) { ShaderGlobals* sg_ptr = (ShaderGlobals*) sg_; if (a == NULL) { return b; } if (b == NULL) { return a; } // Fix up the alignment void* ret = ((char*) sg_ptr->renderstate) + alignment_offset_calc (sg_ptr->renderstate, alignof(OSL::ClosureComponent)); sg_ptr->renderstate = closure_add_allot (ret, a, b); return ret; } #define IS_STRING(type) (type.basetype == OSL::TypeDesc::STRING) #define IS_PTR(type) (type.basetype == OSL::TypeDesc::PTR) #define IS_COLOR(type) (type.vecsemantics == OSL::TypeDesc::COLOR) __device__ bool rend_get_userdata (OSL::StringParam name, void* data, int data_size, const OSL::TypeDesc& type, int index) { // Perform a userdata lookup using the parameter name, type, and // userdata index. If there is a match, memcpy the value into data and // return 1. if (IS_PTR(type) && name.hash() == STRING_PARAMS(colorsystem)) { *(void**)data = *reinterpret_cast<void **>(&OSL::pvt::s_color_system); return true; } // TODO: This is temporary code for initial testing and demonstration. if (IS_STRING(type) && name == HDSTR(OSL::pvt::test_str_1)) { memcpy (data, &OSL::pvt::test_str_2, 8); return true; } return false; } #undef IS_COLOR #undef IS_STRING #undef IS_PTR __device__ int osl_bind_interpolated_param (void *sg_, const void *name, long long type, int userdata_has_derivs, void *userdata_data, int symbol_has_derivs, void *symbol_data, int symbol_data_size, char *userdata_initialized, int userdata_index) { char status = *userdata_initialized; if (status == 0) { bool ok = rend_get_userdata (HDSTR(name), userdata_data, symbol_data_size, (*(OSL::TypeDesc*)&type), userdata_index); *userdata_initialized = status = 1 + ok; } if (status == 2) { memcpy (symbol_data, userdata_data, symbol_data_size ); return 1; } return 0; } __device__ int osl_strlen_is (const char *str) { //return HDSTR(str).length(); return 0; } __device__ int osl_hash_is (const char *str) { return HDSTR(str); } __device__ int osl_getchar_isi (const char *str, int index) { // return (str && unsigned(index) < HDSTR(str).length()) // ? str[index] : 0; return 0; } // Printing is handled by the host. Copy format string's hash and // all the arguments to our print buffer. // Note: the first element of 'args' is the size of the argument list __device__ void osl_printf (void* sg_, char* fmt_str, void* args) { uint64_t fmt_str_hash = HDSTR(fmt_str).hash(); uint64_t args_size = reinterpret_cast<uint64_t*>(args)[0]; // This can be used to limit printing to one Cuda thread for debugging // if (launch_index.x == 0 && launch_index.y == 0) CUdeviceptr copy_start = atomicAdd(&OSL::pvt::osl_printf_buffer_start, args_size + sizeof(args_size) + sizeof(fmt_str_hash)); // Only perform copy if there's enough space if (copy_start + args_size + sizeof(args_size) + sizeof(fmt_str_hash) < OSL::pvt::osl_printf_buffer_end) { memcpy(reinterpret_cast<void *>(copy_start), &fmt_str_hash, sizeof(fmt_str_hash) ); memcpy(reinterpret_cast<void *>(copy_start + sizeof(fmt_str_hash)), &args_size, sizeof(args_size) ); memcpy(reinterpret_cast<void *>(copy_start + sizeof(fmt_str_hash) + sizeof(args_size)), reinterpret_cast<char *>(args) + sizeof(args_size), args_size); } } __device__ void* osl_get_noise_options (void *sg_) { ShaderGlobals* sg = ((ShaderGlobals*)sg_); NoiseOptCUDA* opt = (NoiseOptCUDA*)((ShadingContextCUDA*)sg->context)->noise_options_ptr(); new (opt) NoiseOptCUDA; return opt; } __device__ void* osl_get_texture_options (void *sg_) { return 0; } __device__ void osl_texture_set_interp_code(void *opt, int mode) { // ((TextureOpt *)opt)->interpmode = (TextureOpt::InterpMode)mode; } __device__ void osl_texture_set_stwrap_code (void *opt, int mode) { //((TextureOpt *)opt)->swrap = (TextureOpt::Wrap)mode; //((TextureOpt *)opt)->twrap = (TextureOpt::Wrap)mode; } __forceinline__ __device__ float3 make_float3(const float4& a) { return make_float3(a.x, a.y, a.z); } // FIXME: // clang++ 9.0 seems to have trouble with tex2d<float4>() look-ups, // so we'll declare this external and implement texture lookups in // CUDA files compiled by nvcc (optix_grid_renderer.cu and // optix_raytrace.cu). // (clang++ 9.0 error 'undefined __nv_tex_surf_handler') extern __device__ float4 osl_tex2DLookup(void *handle, float s, float t); __device__ int osl_texture (void *sg_, const char *name, void *handle, void *opt_, float s, float t, float dsdx, float dtdx, float dsdy, float dtdy, int chans, void *result, void *dresultdx, void *dresultdy, void *alpha, void *dalphadx, void *dalphady, void *ustring_errormessage) { if (!handle) return 0; cudaTextureObject_t texID = cudaTextureObject_t(handle); float4 fromTexture = osl_tex2DLookup(handle, s, t); // see note above // float4 fromTexture = tex2D<float4>(texID, s, t); *((float3*)result) = make_float3(fromTexture.x, fromTexture.y, fromTexture.z); return 1; } __device__ int osl_range_check_err (int indexvalue, int length, const char *symname, void *sg, const void *sourcefile, int sourceline, const char *groupname, int layer, const char *layername, const char *shadername) { if (indexvalue < 0 || indexvalue >= length) { return indexvalue < 0 ? 0 : length-1; } return indexvalue; } __device__ int osl_range_check (int indexvalue, int length, const char *symname, void *sg, const void *sourcefile, int sourceline, const char *groupname, int layer, const char *layername, const char *shadername) { if (indexvalue < 0 || indexvalue >= length) { indexvalue = osl_range_check_err (indexvalue, length, symname, sg, sourcefile, sourceline, groupname, layer, layername, shadername); } return indexvalue; } #define MAT(m) (*(OSL::Matrix44 *)m) __device__ int osl_get_matrix (void *sg_, void *r, const char *from) { ShaderGlobals *sg = (ShaderGlobals *)sg_; //ShadingContext *ctx = (ShadingContext *)sg->context; if (HDSTR(from) == STRING_PARAMS(common) || //HDSTR(from) == ctx->shadingsys().commonspace_synonym() || HDSTR(from) == STRING_PARAMS(shader)) { MAT(r).makeIdentity (); return true; } if (HDSTR(from) == STRING_PARAMS(object)) { // TODO: Implement transform return false; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } __device__ int osl_get_inverse_matrix (void *sg_, void *r, const char *to) { ShaderGlobals *sg = (ShaderGlobals *)sg_; if (HDSTR(to) == STRING_PARAMS(common) || //HDSTR(to) == ctx->shadingsys().commonspace_synonym() || HDSTR(to) == STRING_PARAMS(shader)) { MAT(r).makeIdentity (); return true; } if (HDSTR(to) == STRING_PARAMS(object)) { // TODO: Implement transform return false; } int ok = false; // TODO: Implement transform if (!ok) { MAT(r).makeIdentity(); // TBR: OSL would throw an error here, what should we do? } return ok; } #undef MAT } #endif //#if (OPTIX_VERSION < 70000)
2baa81046fef0a1de17db528663e013f3ef47fa0.hip
// !!! This is a file automatically generated by hipify!!! /**** * Copyright (c) 2014, NVIDIA Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. ****/ #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "nvshmem.h" #include "nvshmem_device.h" #include "mpi.h" #define CUDA_CHECK(stmt) \ do { \ hipError_t result = (stmt); \ if (hipSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", \ __FILE__, __LINE__,hipGetErrorString(result));\ exit(-1); \ } \ assert(hipSuccess == result); \ } while (0) __global__ void ping_pong (int *wait_flag_d, int pe, int iter, int skip) { long long int start, stop, time; int i, peer; peer = !pe; for (i=0; i<(iter+skip); i++) { if (i == skip) start = clock(); //printf("pe: %d peer: %d cond: %d \n", pe, peer, i&1); if ((i&1) == pe) { nvshmem_int_wait_until (wait_flag_d, 0, i+1); nvshmem_int_p (wait_flag_d, i+1, peer); nvshmem_quiet (); } else { nvshmem_int_p (wait_flag_d, i+1, peer); nvshmem_quiet (); nvshmem_int_wait_until (wait_flag_d, 0, i+1); } } stop = clock(); if (pe == 0) { time = (stop - start)/iter; printf("sync latency: %lld cycles\n", time); } } int main (int c, char *v[]) { int local_rank = 0; int dev_count, mype, npes; int *wait_flag_d; int iter = 200; int skip = 20; CUDA_CHECK(hipGetDeviceCount(&dev_count)); if (dev_count <= 0) { fprintf(stderr, "no CUDA devices found \n"); exit(-1); } if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK")); } CUDA_CHECK(hipSetDevice(local_rank%dev_count)); MPI_Init (&c, &v); nvstart_pes(); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); if (npes != 2) { fprintf(stderr, "This test requires exactly two processes \n"); goto finalize; } wait_flag_d = (int *) nvshmalloc (sizeof(int)); hipMemset(wait_flag_d, 0, sizeof(int)); CUDA_CHECK(hipDeviceSynchronize()); hipLaunchKernelGGL(( ping_pong) , dim3(1), dim3(1), 0, 0, wait_flag_d, mype, iter, skip); CUDA_CHECK(hipDeviceSynchronize()); nvshmem_barrier_all(); finalize: nvshmcleanup(); nvstop_pes(); MPI_Finalize(); return 0; }
2baa81046fef0a1de17db528663e013f3ef47fa0.cu
/**** * Copyright (c) 2014, NVIDIA Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. ****/ #include <stdio.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #include "nvshmem.h" #include "nvshmem_device.h" #include "mpi.h" #define CUDA_CHECK(stmt) \ do { \ cudaError_t result = (stmt); \ if (cudaSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", \ __FILE__, __LINE__,cudaGetErrorString(result));\ exit(-1); \ } \ assert(cudaSuccess == result); \ } while (0) __global__ void ping_pong (int *wait_flag_d, int pe, int iter, int skip) { long long int start, stop, time; int i, peer; peer = !pe; for (i=0; i<(iter+skip); i++) { if (i == skip) start = clock(); //printf("pe: %d peer: %d cond: %d \n", pe, peer, i&1); if ((i&1) == pe) { nvshmem_int_wait_until (wait_flag_d, 0, i+1); nvshmem_int_p (wait_flag_d, i+1, peer); nvshmem_quiet (); } else { nvshmem_int_p (wait_flag_d, i+1, peer); nvshmem_quiet (); nvshmem_int_wait_until (wait_flag_d, 0, i+1); } } stop = clock(); if (pe == 0) { time = (stop - start)/iter; printf("sync latency: %lld cycles\n", time); } } int main (int c, char *v[]) { int local_rank = 0; int dev_count, mype, npes; int *wait_flag_d; int iter = 200; int skip = 20; CUDA_CHECK(cudaGetDeviceCount(&dev_count)); if (dev_count <= 0) { fprintf(stderr, "no CUDA devices found \n"); exit(-1); } if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK")); } CUDA_CHECK(cudaSetDevice(local_rank%dev_count)); MPI_Init (&c, &v); nvstart_pes(); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); if (npes != 2) { fprintf(stderr, "This test requires exactly two processes \n"); goto finalize; } wait_flag_d = (int *) nvshmalloc (sizeof(int)); cudaMemset(wait_flag_d, 0, sizeof(int)); CUDA_CHECK(cudaDeviceSynchronize()); ping_pong <<<1, 1>>> (wait_flag_d, mype, iter, skip); CUDA_CHECK(cudaDeviceSynchronize()); nvshmem_barrier_all(); finalize: nvshmcleanup(); nvstop_pes(); MPI_Finalize(); return 0; }
3d86ca75aa195573d0bdd00c3617fb49a72512eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorSort.cu" #else // In alignment with default sort on a c++ map, this function // will permute key and value tensors identically, and // in such a way that the 'key' tensor is ordered numerically THC_API void THCTensor_(sortKeyValueInplace)(THCState* state, THCTensor* key, THCudaLongTensor* value, int dim, bool dir) { THLongStorage *valueSize = THCudaLongTensor_newSizeOf(state, value); THArgCheck(THCTensor_(isSize)(state, key, valueSize), 2, "Key tensor must have same size as value tensor"); THLongStorage_free(valueSize); int dims = THCudaLongTensor__nDimension(state, value); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); dims = THCTensor_(_nDimension)(state, key); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); ptrdiff_t inElements = THCTensor_(nElement)(state, key); int64_t keySliceSize = THCTensor_(size)(state, key, dim); ptrdiff_t keySlices = inElements / keySliceSize; if (THCTensor_(_nDimension)(state, key) == 0) { // Zero-dim tensor; do nothing return; } // The amount of shared memory and block size is based on // 2^ceil(lg(n)); we choose that sorting implementation for a given // size. int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize); // FIXME: We'd have to find some other trick with Thrust to perform a // vectorized (key, value) sort by slice segment if (ceilPowerOf2 > 2048) { THError("sortKeyValueInplace only works for sizes <= 2048 at present"); } // The grid is based on the number of independent slices that we // have to sort; one block per slice dim3 grid; if (!THC_getGridFromTiles(keySlices, grid)) { THError("Slice to sort is too large"); } #define HANDLE_CASE(TYPE, A, SIZE) \ do { \ int blockSize = SIZE / 2; \ if (blockSize < 1) { \ blockSize = 1; \ } \ \ dim3 block(blockSize); \ \ if (dir) { \ hipLaunchKernelGGL(( bitonicSortKVInPlace<real, int64_t, A, -1, GTComp<real>, TYPE, SIZE>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ GTComp<real>()); \ } else { \ hipLaunchKernelGGL(( bitonicSortKVInPlace<real, int64_t, A, -1, LTComp<real>, TYPE, SIZE>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ LTComp<real>()); \ } \ } while (0) #define HANDLE_SORT_CASE(TYPE, A) \ { \ switch (ceilPowerOf2) { \ case 2048: \ HANDLE_CASE(TYPE, A, 2048); \ break; \ case 1024: \ case 512: \ case 256: \ HANDLE_CASE(TYPE, A, 1024); \ break; \ case 128: \ case 64: \ HANDLE_CASE(TYPE, A, 128); \ break; \ case 32: \ case 16: \ case 8: \ case 4: \ case 2: \ HANDLE_CASE(TYPE, A, 32); \ break; \ case 1: \ /* Nothing to do, data already sorted */ \ break; \ default: \ assert(false); \ } \ } // The constructed key/value tensor info is used to select the slice // we are sorting on a per-block basis if (THCTensor_canUse32BitIndexMath(state, key)) { TensorInfo<real, unsigned int> keyInfo = getTensorInfo<real, THCTensor, unsigned int>(state, key); keyInfo.reduceDim(dim); int collapseKeyDim = keyInfo.collapseDims(dim); TensorInfo<int64_t, unsigned int> valueInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, value); valueInfo.reduceDim(dim); int collapseValueDim = valueInfo.collapseDims(dim); if (keyInfo.isContiguous()) { HANDLE_SORT_CASE(unsigned int, -2); } else { switch (keyInfo.dims) { case 2: HANDLE_SORT_CASE(unsigned int, 2); break; default: HANDLE_SORT_CASE(unsigned int, -1); break; } } } else { TensorInfo<real, uint64_t> keyInfo = getTensorInfo<real, THCTensor, uint64_t>(state, key); keyInfo.reduceDim(dim); int collapseKeyDim = keyInfo.collapseDims(dim); TensorInfo<int64_t, uint64_t> valueInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, value); valueInfo.reduceDim(dim); int collapseValueDim = valueInfo.collapseDims(dim); // int64_t case is rare, just instantiate the generic version HANDLE_SORT_CASE(uint64_t, -1); } #undef HANDLE_CASE #undef HANDLE_SORT_CASE #undef HANDLE_A_CASE THCudaCheck(hipGetLastError()); } void THCTensor_(sortViaThrust)(THCState* state, THCTensor* sorted, THCudaLongTensor* indices, THCTensor* input, int dim, bool dir) { int nDims = THCTensor_(_nDimension)(state, input); ptrdiff_t totalElements = THCTensor_(nElement)(state, input); int64_t sliceSize = THCTensor_(size)(state, input, dim); int64_t sliceStride = THCTensor_(stride)(state, input, dim); // We perform a vectorized segmented sort in Thrust. // Say we are sorting a (2, 3) tensor. We have in flattened form: // values 0.4 1.2 5.3 6.2 1.3 2.3 // indices 0 1 2 3 4 5 // where indices is a global index (across all slices) // First we sort by values, globally: // values 6.2 5.3 2.3 1.2 1.3 0.4 // indices 3 2 5 1 4 0 // Then we stable sort by segment, which is index / 3: // values 5.3 1.2 0.4 6.2 2.3 1.3 // indices 2 1 0 3 5 4 // Then we translate the global index to a per-slice Lua index // (index % 3) + 1: // values 5.3 1.2 0.4 6.2 2.3 1.3 // indices 3 2 1 1 3 2 // This method can only work if the slice we are sorting (`dim`) is // innermost, and both values and indices are contiguous. We do this // by re-arranging the input into this form as needed, which will // unfortunately allocate memory if the request is not in this form. // Vectorized sort is slower than iterated sort if the number of // slices is small (since we're sorting twice, instead of invoking a // smaller sort `numSlices` times), but the Thrust sort // implementation here is a catch-all, so we're not looking for // efficiency, but instead correctness. THCTensor_(copy)(state, sorted, input); THCTensor* trKeys = THCTensor_(newWithTensor)(state, sorted); THCudaLongTensor* trIndices = THCudaLongTensor_newWithTensor(state, indices); // Transpose dim to innermost if (dim != nDims - 1) { THCTensor_(transpose)(state, trKeys, NULL, dim, nDims - 1); THCudaLongTensor_transpose(state, trIndices, NULL, dim, nDims - 1); } // Thrust must operate on a contiguous layout THCTensor* trContigKey = THCTensor_(newContiguous)(state, trKeys); THCudaLongTensor* trContigIndices = THCudaLongTensor_newContiguous(state, trIndices); THCTensor_(free)(state, trKeys); THCudaLongTensor_free(state, trIndices); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<real> keyIter(THCTensor_(data)(state, trContigKey)); // Since we are composing a global index across all segments rather // than a per-segment index, we treat the memory as int so we don't // have problems sorting slices < 2^24 but where the entire tensor // has more than 2^24 elements thrust::device_ptr<int64_t> indexIter((int64_t*) THCudaLongTensor_data(state, trContigIndices)); // Fill the indices with a global index across all slices thrust::counting_iterator<int64_t> countIter(0); thrust::copy( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif countIter, countIter + totalElements, indexIter); // First, we sort globally (across all slices) according to key // (the values we're sorting) if (dir) { thrust::stable_sort_by_key( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif keyIter, keyIter + totalElements, indexIter, ThrustGTOp<real>()); } else { thrust::stable_sort_by_key( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif keyIter, keyIter + totalElements, indexIter, ThrustLTOp<real>()); } // Then, re-sort according to slice that each index is // in. This completes the segment sort in Thrust, since we're // stably sorting here, preserving the relative order of values // per each slice thrust::stable_sort_by_key( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif indexIter, indexIter + totalElements, keyIter, SliceComp(sliceSize)); // Translate the global integer 0-based index to a per-slice real // Lua index thrust::for_each( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif indexIter, indexIter + totalElements, GlobalIndexToPerSliceIndex(sliceSize)); // Reverse the transposition as needed if (dim != nDims - 1) { THCTensor_(transpose)(state, trContigKey, NULL, dim, nDims - 1); THCudaLongTensor_transpose(state, trContigIndices, NULL, dim, nDims - 1); } // Then copy back to the expected output THCTensor_(freeCopyTo)(state, trContigKey, sorted); THCudaLongTensor_freeCopyTo(state, trContigIndices, indices); } THC_API void THCTensor_(sort)(THCState* state, THCTensor *sorted, THCudaLongTensor *indices, THCTensor *input, int dim, int order) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, sorted, input)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); int64_t dims = THCTensor_(_nDimension)(state, sorted); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(_nDimension)(state, input); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); dims = THCudaLongTensor__nDimension(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); // Make sure sufficient output space is allocated THCTensor_(resizeAs)(state, sorted, input); THLongStorage *inputSize = THCTensor_(newSizeOf)(state, input); THCudaLongTensor_resize(state, indices, inputSize, NULL); THLongStorage_free(inputSize); // How large are the slices that we are sorting? int64_t sliceSize = THCTensor_(size)(state, input, dim); // Workaround: // CUDA 8 uses more shared memory than 7.5 for bitonicSortKVInPlace, // and so for the double word types, // we get "too many resources requested for launch" in the 2048 case #if TORCH_HIP_VERSION >= 8000 #if defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_LONG) int maxSliceSize = 1024; #else int maxSliceSize = 2048; #endif #else int maxSliceSize = 2048; #endif if (sliceSize <= maxSliceSize) { // Fill `indices` (the values) with the // slice-relative index. THCudaLongTensor_fillSliceWithIndex(state, indices, dim); // We sort k/v pairs in-place; copy unsorted input to output THCTensor_(copy)(state, sorted, input); // Sort using our in-place k/v kernel that supports arbitrary // layout THCTensor_(sortKeyValueInplace)(state, sorted, indices, dim, order); } else { // Otherwise, fall back upon Thrust, which handles all other cases // (potentially slowly, with extra copies/memory allocations) THCTensor_(sortViaThrust)(state, sorted, indices, input, dim, (bool) order); } THCudaCheck(hipGetLastError()); } #endif
3d86ca75aa195573d0bdd00c3617fb49a72512eb.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorSort.cu" #else // In alignment with default sort on a c++ map, this function // will permute key and value tensors identically, and // in such a way that the 'key' tensor is ordered numerically THC_API void THCTensor_(sortKeyValueInplace)(THCState* state, THCTensor* key, THCudaLongTensor* value, int dim, bool dir) { THLongStorage *valueSize = THCudaLongTensor_newSizeOf(state, value); THArgCheck(THCTensor_(isSize)(state, key, valueSize), 2, "Key tensor must have same size as value tensor"); THLongStorage_free(valueSize); int dims = THCudaLongTensor__nDimension(state, value); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); dims = THCTensor_(_nDimension)(state, key); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); ptrdiff_t inElements = THCTensor_(nElement)(state, key); int64_t keySliceSize = THCTensor_(size)(state, key, dim); ptrdiff_t keySlices = inElements / keySliceSize; if (THCTensor_(_nDimension)(state, key) == 0) { // Zero-dim tensor; do nothing return; } // The amount of shared memory and block size is based on // 2^ceil(lg(n)); we choose that sorting implementation for a given // size. int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize); // FIXME: We'd have to find some other trick with Thrust to perform a // vectorized (key, value) sort by slice segment if (ceilPowerOf2 > 2048) { THError("sortKeyValueInplace only works for sizes <= 2048 at present"); } // The grid is based on the number of independent slices that we // have to sort; one block per slice dim3 grid; if (!THC_getGridFromTiles(keySlices, grid)) { THError("Slice to sort is too large"); } #define HANDLE_CASE(TYPE, A, SIZE) \ do { \ int blockSize = SIZE / 2; \ if (blockSize < 1) { \ blockSize = 1; \ } \ \ dim3 block(blockSize); \ \ if (dir) { \ bitonicSortKVInPlace<real, int64_t, A, -1, GTComp<real>, TYPE, SIZE> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ GTComp<real>()); \ } else { \ bitonicSortKVInPlace<real, int64_t, A, -1, LTComp<real>, TYPE, SIZE> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ keyInfo, \ keySlices, \ (TYPE) keySliceSize, \ (TYPE) keyInfo.strides[collapseKeyDim], \ valueInfo, \ (TYPE) valueInfo.strides[collapseValueDim], \ LTComp<real>()); \ } \ } while (0) #define HANDLE_SORT_CASE(TYPE, A) \ { \ switch (ceilPowerOf2) { \ case 2048: \ HANDLE_CASE(TYPE, A, 2048); \ break; \ case 1024: \ case 512: \ case 256: \ HANDLE_CASE(TYPE, A, 1024); \ break; \ case 128: \ case 64: \ HANDLE_CASE(TYPE, A, 128); \ break; \ case 32: \ case 16: \ case 8: \ case 4: \ case 2: \ HANDLE_CASE(TYPE, A, 32); \ break; \ case 1: \ /* Nothing to do, data already sorted */ \ break; \ default: \ assert(false); \ } \ } // The constructed key/value tensor info is used to select the slice // we are sorting on a per-block basis if (THCTensor_canUse32BitIndexMath(state, key)) { TensorInfo<real, unsigned int> keyInfo = getTensorInfo<real, THCTensor, unsigned int>(state, key); keyInfo.reduceDim(dim); int collapseKeyDim = keyInfo.collapseDims(dim); TensorInfo<int64_t, unsigned int> valueInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, value); valueInfo.reduceDim(dim); int collapseValueDim = valueInfo.collapseDims(dim); if (keyInfo.isContiguous()) { HANDLE_SORT_CASE(unsigned int, -2); } else { switch (keyInfo.dims) { case 2: HANDLE_SORT_CASE(unsigned int, 2); break; default: HANDLE_SORT_CASE(unsigned int, -1); break; } } } else { TensorInfo<real, uint64_t> keyInfo = getTensorInfo<real, THCTensor, uint64_t>(state, key); keyInfo.reduceDim(dim); int collapseKeyDim = keyInfo.collapseDims(dim); TensorInfo<int64_t, uint64_t> valueInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, value); valueInfo.reduceDim(dim); int collapseValueDim = valueInfo.collapseDims(dim); // int64_t case is rare, just instantiate the generic version HANDLE_SORT_CASE(uint64_t, -1); } #undef HANDLE_CASE #undef HANDLE_SORT_CASE #undef HANDLE_A_CASE THCudaCheck(cudaGetLastError()); } void THCTensor_(sortViaThrust)(THCState* state, THCTensor* sorted, THCudaLongTensor* indices, THCTensor* input, int dim, bool dir) { int nDims = THCTensor_(_nDimension)(state, input); ptrdiff_t totalElements = THCTensor_(nElement)(state, input); int64_t sliceSize = THCTensor_(size)(state, input, dim); int64_t sliceStride = THCTensor_(stride)(state, input, dim); // We perform a vectorized segmented sort in Thrust. // Say we are sorting a (2, 3) tensor. We have in flattened form: // values 0.4 1.2 5.3 6.2 1.3 2.3 // indices 0 1 2 3 4 5 // where indices is a global index (across all slices) // First we sort by values, globally: // values 6.2 5.3 2.3 1.2 1.3 0.4 // indices 3 2 5 1 4 0 // Then we stable sort by segment, which is index / 3: // values 5.3 1.2 0.4 6.2 2.3 1.3 // indices 2 1 0 3 5 4 // Then we translate the global index to a per-slice Lua index // (index % 3) + 1: // values 5.3 1.2 0.4 6.2 2.3 1.3 // indices 3 2 1 1 3 2 // This method can only work if the slice we are sorting (`dim`) is // innermost, and both values and indices are contiguous. We do this // by re-arranging the input into this form as needed, which will // unfortunately allocate memory if the request is not in this form. // Vectorized sort is slower than iterated sort if the number of // slices is small (since we're sorting twice, instead of invoking a // smaller sort `numSlices` times), but the Thrust sort // implementation here is a catch-all, so we're not looking for // efficiency, but instead correctness. THCTensor_(copy)(state, sorted, input); THCTensor* trKeys = THCTensor_(newWithTensor)(state, sorted); THCudaLongTensor* trIndices = THCudaLongTensor_newWithTensor(state, indices); // Transpose dim to innermost if (dim != nDims - 1) { THCTensor_(transpose)(state, trKeys, NULL, dim, nDims - 1); THCudaLongTensor_transpose(state, trIndices, NULL, dim, nDims - 1); } // Thrust must operate on a contiguous layout THCTensor* trContigKey = THCTensor_(newContiguous)(state, trKeys); THCudaLongTensor* trContigIndices = THCudaLongTensor_newContiguous(state, trIndices); THCTensor_(free)(state, trKeys); THCudaLongTensor_free(state, trIndices); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<real> keyIter(THCTensor_(data)(state, trContigKey)); // Since we are composing a global index across all segments rather // than a per-segment index, we treat the memory as int so we don't // have problems sorting slices < 2^24 but where the entire tensor // has more than 2^24 elements thrust::device_ptr<int64_t> indexIter((int64_t*) THCudaLongTensor_data(state, trContigIndices)); // Fill the indices with a global index across all slices thrust::counting_iterator<int64_t> countIter(0); thrust::copy( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif countIter, countIter + totalElements, indexIter); // First, we sort globally (across all slices) according to key // (the values we're sorting) if (dir) { thrust::stable_sort_by_key( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif keyIter, keyIter + totalElements, indexIter, ThrustGTOp<real>()); } else { thrust::stable_sort_by_key( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif keyIter, keyIter + totalElements, indexIter, ThrustLTOp<real>()); } // Then, re-sort according to slice that each index is // in. This completes the segment sort in Thrust, since we're // stably sorting here, preserving the relative order of values // per each slice thrust::stable_sort_by_key( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif indexIter, indexIter + totalElements, keyIter, SliceComp(sliceSize)); // Translate the global integer 0-based index to a per-slice real // Lua index thrust::for_each( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif indexIter, indexIter + totalElements, GlobalIndexToPerSliceIndex(sliceSize)); // Reverse the transposition as needed if (dim != nDims - 1) { THCTensor_(transpose)(state, trContigKey, NULL, dim, nDims - 1); THCudaLongTensor_transpose(state, trContigIndices, NULL, dim, nDims - 1); } // Then copy back to the expected output THCTensor_(freeCopyTo)(state, trContigKey, sorted); THCudaLongTensor_freeCopyTo(state, trContigIndices, indices); } THC_API void THCTensor_(sort)(THCState* state, THCTensor *sorted, THCudaLongTensor *indices, THCTensor *input, int dim, int order) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, sorted, input)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); int64_t dims = THCTensor_(_nDimension)(state, sorted); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(_nDimension)(state, input); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); dims = THCudaLongTensor__nDimension(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); // Make sure sufficient output space is allocated THCTensor_(resizeAs)(state, sorted, input); THLongStorage *inputSize = THCTensor_(newSizeOf)(state, input); THCudaLongTensor_resize(state, indices, inputSize, NULL); THLongStorage_free(inputSize); // How large are the slices that we are sorting? int64_t sliceSize = THCTensor_(size)(state, input, dim); // Workaround: // CUDA 8 uses more shared memory than 7.5 for bitonicSortKVInPlace, // and so for the double word types, // we get "too many resources requested for launch" in the 2048 case #if CUDA_VERSION >= 8000 #if defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_LONG) int maxSliceSize = 1024; #else int maxSliceSize = 2048; #endif #else int maxSliceSize = 2048; #endif if (sliceSize <= maxSliceSize) { // Fill `indices` (the values) with the // slice-relative index. THCudaLongTensor_fillSliceWithIndex(state, indices, dim); // We sort k/v pairs in-place; copy unsorted input to output THCTensor_(copy)(state, sorted, input); // Sort using our in-place k/v kernel that supports arbitrary // layout THCTensor_(sortKeyValueInplace)(state, sorted, indices, dim, order); } else { // Otherwise, fall back upon Thrust, which handles all other cases // (potentially slowly, with extra copies/memory allocations) THCTensor_(sortViaThrust)(state, sorted, indices, input, dim, (bool) order); } THCudaCheck(cudaGetLastError()); } #endif
52b9bbb83e346ff875daf989e92dbcc47f37f63f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <string> #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/operators/fused/fused_seqpool_cvm_op.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" namespace paddle { namespace operators { template <typename T> using Vector = framework::Vector<T>; #define CUDA_KERNEL_LOOP(i, n) \ for (auto i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) // normal template <typename T> __global__ void FusedSeqpoolKernelNormal(const size_t N, T **input_values, T **seqpool_output_values, size_t **lods_values, const int batch_size, const int embedding_size, const float pad_value) { CUDA_KERNEL_LOOP(i, N) { int key = i / embedding_size; int offset = i % embedding_size; int x = key / batch_size; // slot id int y = key % batch_size; // ins id auto &start = *(lods_values[x] + y); auto &end = *(lods_values[x] + y + 1); T val = static_cast<T>(pad_value); for (auto k = start; k < end; ++k) { val += *(input_values[x] + k * embedding_size + offset); } *(seqpool_output_values[x] + y * embedding_size + offset) = val; } } // join need show click input template <typename T> __global__ void FusedCVMKernelWithCVM(const size_t N, T **output_values, T **seqpool_output_values, const int batch_size, const int embedding_size, const int cvm_offset) { CUDA_KERNEL_LOOP(i, N) { int key = i / embedding_size; int offset = i % embedding_size; int x = key / batch_size; // slot id int y = key % batch_size; // ins id if (offset == 0) { // show *(output_values[x] + y * embedding_size) = log(*(seqpool_output_values[x] + y * embedding_size) + 1); } else if (offset == 1) { // click *(output_values[x] + y * embedding_size + offset) = log(*(seqpool_output_values[x] + y * embedding_size + 1) + 1) - log(*(seqpool_output_values[x] + y * embedding_size) + 1); } else { *(output_values[x] + y * embedding_size + offset) = *(seqpool_output_values[x] + y * embedding_size + offset); } } } // update not need show click input template <typename T> __global__ void FusedCVMKernelNoCVM(const size_t N, T **output_values, T **seqpool_output_values, const int batch_size, const int no_cvm_embedding_size, const int cvm_offset) { CUDA_KERNEL_LOOP(i, N) { int key = i / no_cvm_embedding_size; int offset = i % no_cvm_embedding_size; int x = key / batch_size; // slot id int y = key % batch_size; // ins id // no cvm *(output_values[x] + y * no_cvm_embedding_size + offset) = *(seqpool_output_values[x] + y * (no_cvm_embedding_size + cvm_offset) + offset + cvm_offset); } } template <typename T> void FusedSeqpoolCVM(const framework::ExecutionContext &ctx, // const paddle::platform::Place &place, const std::vector<const T *> &input_data, const std::vector<T *> &output_data, const std::vector<T *> &seqpool_output_data, std::vector<const size_t *> lods, const int batch_size, const int slot_num, const int embedding_size, const float padding_value, const bool use_cvm, const int cvm_offset) { auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); size_t total_ptr_len = input_data.size() + output_data.size() + seqpool_output_data.size() + lods.size(); auto temp_ptr = memory::AllocShared(ctx.GetPlace(), total_ptr_len * sizeof(void *)); void *ptr = temp_ptr->ptr(); #ifdef PADDLE_WITH_HIP T **gpu_input_values = reinterpret_cast<T **>(temp_ptr->ptr()); platform::GpuMemcpyAsync(gpu_input_values, input_data.data(), input_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_output_values = reinterpret_cast<T **>(&gpu_input_values[input_data.size()]); platform::GpuMemcpyAsync(gpu_output_values, output_data.data(), output_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_seqpool_output_values = reinterpret_cast<T **>(&gpu_output_values[output_data.size()]); platform::GpuMemcpyAsync( gpu_seqpool_output_values, seqpool_output_data.data(), seqpool_output_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); size_t **lods_values = reinterpret_cast<size_t **>( &gpu_seqpool_output_values[seqpool_output_data.size()]); platform::GpuMemcpyAsync(lods_values, lods.data(), lods.size() * sizeof(size_t *), hipMemcpyHostToDevice, stream); #else T **gpu_input_values = reinterpret_cast<T **>(temp_ptr->ptr()); platform::GpuMemcpyAsync(gpu_input_values, input_data.data(), input_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_output_values = reinterpret_cast<T **>(&gpu_input_values[input_data.size()]); platform::GpuMemcpyAsync(gpu_output_values, output_data.data(), output_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_seqpool_output_values = reinterpret_cast<T **>(&gpu_output_values[output_data.size()]); platform::GpuMemcpyAsync( gpu_seqpool_output_values, seqpool_output_data.data(), seqpool_output_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); size_t **lods_values = reinterpret_cast<size_t **>( &gpu_seqpool_output_values[seqpool_output_data.size()]); platform::GpuMemcpyAsync(lods_values, lods.data(), lods.size() * sizeof(size_t *), hipMemcpyHostToDevice, stream); #endif size_t N = static_cast<size_t>(batch_size * slot_num * embedding_size); platform::GpuLaunchConfig config = GetGpuLaunchConfig1D(dev_ctx, N); // first sum pool hipLaunchKernelGGL(( FusedSeqpoolKernelNormal), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, stream, N, gpu_input_values, gpu_seqpool_output_values, lods_values, batch_size, embedding_size, padding_value); // second log if (use_cvm) { hipLaunchKernelGGL(( FusedCVMKernelWithCVM), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, stream, N, gpu_output_values, gpu_seqpool_output_values, batch_size, embedding_size, cvm_offset); } else { // not need show click input N = static_cast<size_t>(batch_size * slot_num * (embedding_size - cvm_offset)); platform::GpuLaunchConfig config = GetGpuLaunchConfig1D(dev_ctx, N); hipLaunchKernelGGL(( FusedCVMKernelNoCVM), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, stream, N, gpu_output_values, gpu_seqpool_output_values, batch_size, (embedding_size - cvm_offset), cvm_offset); } } // join grad template <typename T> __global__ void FusedSeqpoolCVMGradKernelWithCVM( const size_t N, T **out_grads_values, T **in_grads_values, T **cvm_values, size_t **lods_values, const int batch_size, const int embedding_size, const int cvm_offset) { CUDA_KERNEL_LOOP(i, N) { int key = i / embedding_size; int offset = i % embedding_size; // embedx offset int x = key / batch_size; // slot id int y = key % batch_size; // ins id T &val = (offset < cvm_offset) ? *(cvm_values[x] + y * cvm_offset + offset) : *(out_grads_values[x] + y * embedding_size + offset); auto &start = *(lods_values[x] + y); auto &end = *(lods_values[x] + y + 1); for (auto k = start; k < end; ++k) { *(in_grads_values[x] + k * embedding_size + offset) = val; } } } // join only show not has click template <typename T> __global__ void FusedSeqpoolCVMGradKernelWithShow( const size_t N, T **out_grads_values, T **in_grads_values, T **cvm_values, size_t **lods_values, const int batch_size, const int embedding_size, const int cvm_offset) { CUDA_KERNEL_LOOP(i, N) { int key = i / embedding_size; int offset = i % embedding_size; // embedx offset int x = key / batch_size; // slot id int y = key % batch_size; // ins id T &val = (offset < cvm_offset) ? *(cvm_values[x] + y * cvm_offset + offset) : *(out_grads_values[x] + y * (embedding_size - 1) + offset - 1); auto &start = *(lods_values[x] + y); auto &end = *(lods_values[x] + y + 1); for (auto k = start; k < end; ++k) { *(in_grads_values[x] + k * embedding_size + offset) = val; } } } // update grad template <typename T> __global__ void FusedSeqpoolCVMGradKernelNoCVM( const size_t N, T **out_grads_values, T **in_grads_values, T **cvm_values, size_t **lods_values, const int batch_size, const int embedding_size, const int cvm_offset) { CUDA_KERNEL_LOOP(i, N) { int key = i / embedding_size; int offset = i % embedding_size; // embedx offset int x = key / batch_size; // slot id int y = key % batch_size; // ins id T &val = (offset < cvm_offset) ? *(cvm_values[x] + y * cvm_offset + offset) : *(out_grads_values[x] + y * (embedding_size - cvm_offset) + offset - cvm_offset); auto &start = *(lods_values[x] + y); auto &end = *(lods_values[x] + y + 1); for (auto k = start; k < end; ++k) { *(in_grads_values[x] + k * embedding_size + offset) = val; } } } template <typename T> void FusedSeqpoolCVMGrad(const framework::ExecutionContext &ctx, const std::vector<const T *> &out_grads_data, const std::vector<T *> &in_grads_data, const std::vector<const T *> &cvm_data, const std::vector<const size_t *> &lods, const int batch_size, const int slot_num, const int embedding_size, const bool use_cvm, const int cvm_offset) { auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); size_t total_ptr_len = out_grads_data.size() + in_grads_data.size() + cvm_data.size() + lods.size(); auto temp_ptr = memory::AllocShared(ctx.GetPlace(), total_ptr_len * sizeof(void *)); #ifdef PADDLE_WITH_HIP T **gpu_out_grads_values = reinterpret_cast<T **>(temp_ptr->ptr()); platform::GpuMemcpyAsync(gpu_out_grads_values, out_grads_data.data(), out_grads_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_in_grads_values = reinterpret_cast<T **>(&gpu_out_grads_values[out_grads_data.size()]); platform::GpuMemcpyAsync(gpu_in_grads_values, in_grads_data.data(), in_grads_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_cvm_values = reinterpret_cast<T **>(&gpu_in_grads_values[in_grads_data.size()]); platform::GpuMemcpyAsync(gpu_cvm_values, cvm_data.data(), cvm_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); size_t **lods_values = reinterpret_cast<size_t **>(&gpu_cvm_values[cvm_data.size()]); platform::GpuMemcpyAsync(lods_values, lods.data(), lods.size() * sizeof(size_t *), hipMemcpyHostToDevice, stream); #else T **gpu_out_grads_values = reinterpret_cast<T **>(temp_ptr->ptr()); platform::GpuMemcpyAsync(gpu_out_grads_values, out_grads_data.data(), out_grads_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_in_grads_values = reinterpret_cast<T **>(&gpu_out_grads_values[out_grads_data.size()]); platform::GpuMemcpyAsync(gpu_in_grads_values, in_grads_data.data(), in_grads_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_cvm_values = reinterpret_cast<T **>(&gpu_in_grads_values[in_grads_data.size()]); platform::GpuMemcpyAsync(gpu_cvm_values, cvm_data.data(), cvm_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); size_t **lods_values = reinterpret_cast<size_t **>(&gpu_cvm_values[cvm_data.size()]); platform::GpuMemcpyAsync(lods_values, lods.data(), lods.size() * sizeof(size_t *), hipMemcpyHostToDevice, stream); #endif size_t N = static_cast<size_t>(batch_size * slot_num * embedding_size); auto config = GetGpuLaunchConfig1D(dev_ctx, N); if (use_cvm) { // join grad hipLaunchKernelGGL(( FusedSeqpoolCVMGradKernelWithCVM), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, stream, N, gpu_out_grads_values, gpu_in_grads_values, gpu_cvm_values, lods_values, batch_size, embedding_size, cvm_offset); } else { // update grad hipLaunchKernelGGL(( FusedSeqpoolCVMGradKernelNoCVM), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, stream, N, gpu_out_grads_values, gpu_in_grads_values, gpu_cvm_values, lods_values, batch_size, embedding_size, cvm_offset); } } template <typename T> class FusedSeqpoolCVMCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto inputs = ctx.MultiInput<LoDTensor>("X"); auto outputs = ctx.MultiOutput<framework::Tensor>("Out"); const auto slot_size = inputs.size(); std::vector<const float *> input_data(slot_size); std::vector<const size_t *> lods_data(slot_size); std::vector<T *> output_data(slot_size); std::vector<LoDTensor> seqpool_outputs(slot_size); std::vector<T *> seqpool_output_data(slot_size); auto padding_value = ctx.Attr<float>("pad_value"); auto use_cvm = ctx.Attr<bool>("use_cvm"); const int cvm_offset = ctx.Attr<int>("cvm_offset"); int embedding_size = inputs[0]->numel() / inputs[0]->dims()[0]; int batch_size = -1; std::vector<paddle::framework::MixVector<size_t> *> mix_lods_v(slot_size); for (size_t i = 0; i < slot_size; ++i) { const auto *input = inputs[i]; Vector<size_t> lods; if (input->lod().size() != 0) { auto lod = input->lod(); lods = lod[0]; } else { lods.push_back(0); for (int i = 0; i < input->dims()[0]; i++) { lods.push_back(i + 1); } } int cur_batch_size = input->lod().size() ? input->lod()[0].size() - 1 : input->dims()[0]; if (batch_size == -1) { batch_size = cur_batch_size; } else { PADDLE_ENFORCE_EQ(batch_size, cur_batch_size, platform::errors::PreconditionNotMet( "The batch size of all input should be same, " "please cheack, last batchsize is %d, current " "batchsize is %d", batch_size, cur_batch_size)); } input_data[i] = reinterpret_cast<const T *>(input->data<T>()); auto *output = outputs[i]; if (use_cvm) { output->Resize({batch_size, embedding_size}); } else { output->Resize({batch_size, embedding_size - cvm_offset}); } output_data[i] = reinterpret_cast<T *>(output->mutable_data<T>(ctx.GetPlace())); mix_lods_v[i] = new paddle::framework::MixVector<size_t>(&lods); lods_data[i] = mix_lods_v[i]->CUDAData(ctx.GetPlace()); seqpool_output_data[i] = reinterpret_cast<T *>(seqpool_outputs[i].mutable_data<T>( {batch_size, embedding_size}, ctx.GetPlace())); } FusedSeqpoolCVM(ctx, input_data, output_data, seqpool_output_data, lods_data, batch_size, slot_size, embedding_size, padding_value, use_cvm, cvm_offset); for (int i = 0; i < slot_size; i++) { delete mix_lods_v[i]; } } }; template <typename T> class FusedSeqpoolCVMGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto out_grads = ctx.MultiInput<LoDTensor>(framework::GradVarName("Out")); auto in_grads = ctx.MultiOutput<LoDTensor>(framework::GradVarName("X")); auto *cvm = ctx.Input<LoDTensor>("CVM"); std::string pooltype = ctx.Attr<std::string>("pooltype"); auto use_cvm = ctx.Attr<bool>("use_cvm"); const int cvm_offset = ctx.Attr<int>("cvm_offset"); const auto slot_size = in_grads.size(); std::vector<const T *> out_grads_data(slot_size); std::vector<T *> in_grads_data(slot_size); std::vector<const T *> cvm_data(slot_size); std::vector<const size_t *> lods_data(slot_size); int embedding_size = in_grads[0]->numel() / in_grads[0]->dims()[0]; int batch_size = -1; std::vector<paddle::framework::MixVector<size_t> *> mix_lods_v(slot_size); for (size_t i = 0; i < slot_size; ++i) { auto *in_grad = in_grads[i]; Vector<size_t> lods; if (in_grad->lod().size() != 0) { auto lod = in_grad->lod(); lods = lod[0]; } else { lods.push_back(0); for (int i = 0; i < in_grad->dims()[0]; i++) { lods.push_back(i + 1); } } int cur_batch_size = in_grad->lod().size() ? in_grad->lod()[0].size() - 1 : in_grad->dims()[0]; if (batch_size == -1) { batch_size = cur_batch_size; } else { PADDLE_ENFORCE_EQ(batch_size, cur_batch_size, platform::errors::PreconditionNotMet( "The batch size of all input should be same, " "please cheack, last batchsize is %d, current " "batchsize is %d", batch_size, cur_batch_size)); } auto *out_grad = out_grads[i]; out_grads_data[i] = reinterpret_cast<const T *>(out_grad->data<T>()); in_grads_data[i] = reinterpret_cast<T *>(in_grad->mutable_data<T>(ctx.GetPlace())); mix_lods_v[i] = new paddle::framework::MixVector<size_t>(&lods); lods_data[i] = mix_lods_v[i]->CUDAData(ctx.GetPlace()); cvm_data[i] = reinterpret_cast<const T *>(cvm->data<T>()); } FusedSeqpoolCVMGrad(ctx, out_grads_data, in_grads_data, cvm_data, lods_data, batch_size, slot_size, embedding_size, use_cvm, cvm_offset); for (int i = 0; i < slot_size; i++) { delete mix_lods_v[i]; } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(fused_seqpool_cvm, ops::FusedSeqpoolCVMCUDAKernel<float>); REGISTER_OP_CUDA_KERNEL(fused_seqpool_cvm_grad, ops::FusedSeqpoolCVMGradCUDAKernel<float>);
52b9bbb83e346ff875daf989e92dbcc47f37f63f.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <string> #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/operators/fused/fused_seqpool_cvm_op.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" namespace paddle { namespace operators { template <typename T> using Vector = framework::Vector<T>; #define CUDA_KERNEL_LOOP(i, n) \ for (auto i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) // normal template <typename T> __global__ void FusedSeqpoolKernelNormal(const size_t N, T **input_values, T **seqpool_output_values, size_t **lods_values, const int batch_size, const int embedding_size, const float pad_value) { CUDA_KERNEL_LOOP(i, N) { int key = i / embedding_size; int offset = i % embedding_size; int x = key / batch_size; // slot id int y = key % batch_size; // ins id auto &start = *(lods_values[x] + y); auto &end = *(lods_values[x] + y + 1); T val = static_cast<T>(pad_value); for (auto k = start; k < end; ++k) { val += *(input_values[x] + k * embedding_size + offset); } *(seqpool_output_values[x] + y * embedding_size + offset) = val; } } // join need show click input template <typename T> __global__ void FusedCVMKernelWithCVM(const size_t N, T **output_values, T **seqpool_output_values, const int batch_size, const int embedding_size, const int cvm_offset) { CUDA_KERNEL_LOOP(i, N) { int key = i / embedding_size; int offset = i % embedding_size; int x = key / batch_size; // slot id int y = key % batch_size; // ins id if (offset == 0) { // show *(output_values[x] + y * embedding_size) = log(*(seqpool_output_values[x] + y * embedding_size) + 1); } else if (offset == 1) { // click *(output_values[x] + y * embedding_size + offset) = log(*(seqpool_output_values[x] + y * embedding_size + 1) + 1) - log(*(seqpool_output_values[x] + y * embedding_size) + 1); } else { *(output_values[x] + y * embedding_size + offset) = *(seqpool_output_values[x] + y * embedding_size + offset); } } } // update not need show click input template <typename T> __global__ void FusedCVMKernelNoCVM(const size_t N, T **output_values, T **seqpool_output_values, const int batch_size, const int no_cvm_embedding_size, const int cvm_offset) { CUDA_KERNEL_LOOP(i, N) { int key = i / no_cvm_embedding_size; int offset = i % no_cvm_embedding_size; int x = key / batch_size; // slot id int y = key % batch_size; // ins id // no cvm *(output_values[x] + y * no_cvm_embedding_size + offset) = *(seqpool_output_values[x] + y * (no_cvm_embedding_size + cvm_offset) + offset + cvm_offset); } } template <typename T> void FusedSeqpoolCVM(const framework::ExecutionContext &ctx, // const paddle::platform::Place &place, const std::vector<const T *> &input_data, const std::vector<T *> &output_data, const std::vector<T *> &seqpool_output_data, std::vector<const size_t *> lods, const int batch_size, const int slot_num, const int embedding_size, const float padding_value, const bool use_cvm, const int cvm_offset) { auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); size_t total_ptr_len = input_data.size() + output_data.size() + seqpool_output_data.size() + lods.size(); auto temp_ptr = memory::AllocShared(ctx.GetPlace(), total_ptr_len * sizeof(void *)); void *ptr = temp_ptr->ptr(); #ifdef PADDLE_WITH_HIP T **gpu_input_values = reinterpret_cast<T **>(temp_ptr->ptr()); platform::GpuMemcpyAsync(gpu_input_values, input_data.data(), input_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_output_values = reinterpret_cast<T **>(&gpu_input_values[input_data.size()]); platform::GpuMemcpyAsync(gpu_output_values, output_data.data(), output_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_seqpool_output_values = reinterpret_cast<T **>(&gpu_output_values[output_data.size()]); platform::GpuMemcpyAsync( gpu_seqpool_output_values, seqpool_output_data.data(), seqpool_output_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); size_t **lods_values = reinterpret_cast<size_t **>( &gpu_seqpool_output_values[seqpool_output_data.size()]); platform::GpuMemcpyAsync(lods_values, lods.data(), lods.size() * sizeof(size_t *), hipMemcpyHostToDevice, stream); #else T **gpu_input_values = reinterpret_cast<T **>(temp_ptr->ptr()); platform::GpuMemcpyAsync(gpu_input_values, input_data.data(), input_data.size() * sizeof(T *), cudaMemcpyHostToDevice, stream); T **gpu_output_values = reinterpret_cast<T **>(&gpu_input_values[input_data.size()]); platform::GpuMemcpyAsync(gpu_output_values, output_data.data(), output_data.size() * sizeof(T *), cudaMemcpyHostToDevice, stream); T **gpu_seqpool_output_values = reinterpret_cast<T **>(&gpu_output_values[output_data.size()]); platform::GpuMemcpyAsync( gpu_seqpool_output_values, seqpool_output_data.data(), seqpool_output_data.size() * sizeof(T *), cudaMemcpyHostToDevice, stream); size_t **lods_values = reinterpret_cast<size_t **>( &gpu_seqpool_output_values[seqpool_output_data.size()]); platform::GpuMemcpyAsync(lods_values, lods.data(), lods.size() * sizeof(size_t *), cudaMemcpyHostToDevice, stream); #endif size_t N = static_cast<size_t>(batch_size * slot_num * embedding_size); platform::GpuLaunchConfig config = GetGpuLaunchConfig1D(dev_ctx, N); // first sum pool FusedSeqpoolKernelNormal<<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>( N, gpu_input_values, gpu_seqpool_output_values, lods_values, batch_size, embedding_size, padding_value); // second log if (use_cvm) { FusedCVMKernelWithCVM<<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>(N, gpu_output_values, gpu_seqpool_output_values, batch_size, embedding_size, cvm_offset); } else { // not need show click input N = static_cast<size_t>(batch_size * slot_num * (embedding_size - cvm_offset)); platform::GpuLaunchConfig config = GetGpuLaunchConfig1D(dev_ctx, N); FusedCVMKernelNoCVM<<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>(N, gpu_output_values, gpu_seqpool_output_values, batch_size, (embedding_size - cvm_offset), cvm_offset); } } // join grad template <typename T> __global__ void FusedSeqpoolCVMGradKernelWithCVM( const size_t N, T **out_grads_values, T **in_grads_values, T **cvm_values, size_t **lods_values, const int batch_size, const int embedding_size, const int cvm_offset) { CUDA_KERNEL_LOOP(i, N) { int key = i / embedding_size; int offset = i % embedding_size; // embedx offset int x = key / batch_size; // slot id int y = key % batch_size; // ins id T &val = (offset < cvm_offset) ? *(cvm_values[x] + y * cvm_offset + offset) : *(out_grads_values[x] + y * embedding_size + offset); auto &start = *(lods_values[x] + y); auto &end = *(lods_values[x] + y + 1); for (auto k = start; k < end; ++k) { *(in_grads_values[x] + k * embedding_size + offset) = val; } } } // join only show not has click template <typename T> __global__ void FusedSeqpoolCVMGradKernelWithShow( const size_t N, T **out_grads_values, T **in_grads_values, T **cvm_values, size_t **lods_values, const int batch_size, const int embedding_size, const int cvm_offset) { CUDA_KERNEL_LOOP(i, N) { int key = i / embedding_size; int offset = i % embedding_size; // embedx offset int x = key / batch_size; // slot id int y = key % batch_size; // ins id T &val = (offset < cvm_offset) ? *(cvm_values[x] + y * cvm_offset + offset) : *(out_grads_values[x] + y * (embedding_size - 1) + offset - 1); auto &start = *(lods_values[x] + y); auto &end = *(lods_values[x] + y + 1); for (auto k = start; k < end; ++k) { *(in_grads_values[x] + k * embedding_size + offset) = val; } } } // update grad template <typename T> __global__ void FusedSeqpoolCVMGradKernelNoCVM( const size_t N, T **out_grads_values, T **in_grads_values, T **cvm_values, size_t **lods_values, const int batch_size, const int embedding_size, const int cvm_offset) { CUDA_KERNEL_LOOP(i, N) { int key = i / embedding_size; int offset = i % embedding_size; // embedx offset int x = key / batch_size; // slot id int y = key % batch_size; // ins id T &val = (offset < cvm_offset) ? *(cvm_values[x] + y * cvm_offset + offset) : *(out_grads_values[x] + y * (embedding_size - cvm_offset) + offset - cvm_offset); auto &start = *(lods_values[x] + y); auto &end = *(lods_values[x] + y + 1); for (auto k = start; k < end; ++k) { *(in_grads_values[x] + k * embedding_size + offset) = val; } } } template <typename T> void FusedSeqpoolCVMGrad(const framework::ExecutionContext &ctx, const std::vector<const T *> &out_grads_data, const std::vector<T *> &in_grads_data, const std::vector<const T *> &cvm_data, const std::vector<const size_t *> &lods, const int batch_size, const int slot_num, const int embedding_size, const bool use_cvm, const int cvm_offset) { auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); size_t total_ptr_len = out_grads_data.size() + in_grads_data.size() + cvm_data.size() + lods.size(); auto temp_ptr = memory::AllocShared(ctx.GetPlace(), total_ptr_len * sizeof(void *)); #ifdef PADDLE_WITH_HIP T **gpu_out_grads_values = reinterpret_cast<T **>(temp_ptr->ptr()); platform::GpuMemcpyAsync(gpu_out_grads_values, out_grads_data.data(), out_grads_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_in_grads_values = reinterpret_cast<T **>(&gpu_out_grads_values[out_grads_data.size()]); platform::GpuMemcpyAsync(gpu_in_grads_values, in_grads_data.data(), in_grads_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); T **gpu_cvm_values = reinterpret_cast<T **>(&gpu_in_grads_values[in_grads_data.size()]); platform::GpuMemcpyAsync(gpu_cvm_values, cvm_data.data(), cvm_data.size() * sizeof(T *), hipMemcpyHostToDevice, stream); size_t **lods_values = reinterpret_cast<size_t **>(&gpu_cvm_values[cvm_data.size()]); platform::GpuMemcpyAsync(lods_values, lods.data(), lods.size() * sizeof(size_t *), hipMemcpyHostToDevice, stream); #else T **gpu_out_grads_values = reinterpret_cast<T **>(temp_ptr->ptr()); platform::GpuMemcpyAsync(gpu_out_grads_values, out_grads_data.data(), out_grads_data.size() * sizeof(T *), cudaMemcpyHostToDevice, stream); T **gpu_in_grads_values = reinterpret_cast<T **>(&gpu_out_grads_values[out_grads_data.size()]); platform::GpuMemcpyAsync(gpu_in_grads_values, in_grads_data.data(), in_grads_data.size() * sizeof(T *), cudaMemcpyHostToDevice, stream); T **gpu_cvm_values = reinterpret_cast<T **>(&gpu_in_grads_values[in_grads_data.size()]); platform::GpuMemcpyAsync(gpu_cvm_values, cvm_data.data(), cvm_data.size() * sizeof(T *), cudaMemcpyHostToDevice, stream); size_t **lods_values = reinterpret_cast<size_t **>(&gpu_cvm_values[cvm_data.size()]); platform::GpuMemcpyAsync(lods_values, lods.data(), lods.size() * sizeof(size_t *), cudaMemcpyHostToDevice, stream); #endif size_t N = static_cast<size_t>(batch_size * slot_num * embedding_size); auto config = GetGpuLaunchConfig1D(dev_ctx, N); if (use_cvm) { // join grad FusedSeqpoolCVMGradKernelWithCVM<<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>( N, gpu_out_grads_values, gpu_in_grads_values, gpu_cvm_values, lods_values, batch_size, embedding_size, cvm_offset); } else { // update grad FusedSeqpoolCVMGradKernelNoCVM<<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>( N, gpu_out_grads_values, gpu_in_grads_values, gpu_cvm_values, lods_values, batch_size, embedding_size, cvm_offset); } } template <typename T> class FusedSeqpoolCVMCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto inputs = ctx.MultiInput<LoDTensor>("X"); auto outputs = ctx.MultiOutput<framework::Tensor>("Out"); const auto slot_size = inputs.size(); std::vector<const float *> input_data(slot_size); std::vector<const size_t *> lods_data(slot_size); std::vector<T *> output_data(slot_size); std::vector<LoDTensor> seqpool_outputs(slot_size); std::vector<T *> seqpool_output_data(slot_size); auto padding_value = ctx.Attr<float>("pad_value"); auto use_cvm = ctx.Attr<bool>("use_cvm"); const int cvm_offset = ctx.Attr<int>("cvm_offset"); int embedding_size = inputs[0]->numel() / inputs[0]->dims()[0]; int batch_size = -1; std::vector<paddle::framework::MixVector<size_t> *> mix_lods_v(slot_size); for (size_t i = 0; i < slot_size; ++i) { const auto *input = inputs[i]; Vector<size_t> lods; if (input->lod().size() != 0) { auto lod = input->lod(); lods = lod[0]; } else { lods.push_back(0); for (int i = 0; i < input->dims()[0]; i++) { lods.push_back(i + 1); } } int cur_batch_size = input->lod().size() ? input->lod()[0].size() - 1 : input->dims()[0]; if (batch_size == -1) { batch_size = cur_batch_size; } else { PADDLE_ENFORCE_EQ(batch_size, cur_batch_size, platform::errors::PreconditionNotMet( "The batch size of all input should be same, " "please cheack, last batchsize is %d, current " "batchsize is %d", batch_size, cur_batch_size)); } input_data[i] = reinterpret_cast<const T *>(input->data<T>()); auto *output = outputs[i]; if (use_cvm) { output->Resize({batch_size, embedding_size}); } else { output->Resize({batch_size, embedding_size - cvm_offset}); } output_data[i] = reinterpret_cast<T *>(output->mutable_data<T>(ctx.GetPlace())); mix_lods_v[i] = new paddle::framework::MixVector<size_t>(&lods); lods_data[i] = mix_lods_v[i]->CUDAData(ctx.GetPlace()); seqpool_output_data[i] = reinterpret_cast<T *>(seqpool_outputs[i].mutable_data<T>( {batch_size, embedding_size}, ctx.GetPlace())); } FusedSeqpoolCVM(ctx, input_data, output_data, seqpool_output_data, lods_data, batch_size, slot_size, embedding_size, padding_value, use_cvm, cvm_offset); for (int i = 0; i < slot_size; i++) { delete mix_lods_v[i]; } } }; template <typename T> class FusedSeqpoolCVMGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto out_grads = ctx.MultiInput<LoDTensor>(framework::GradVarName("Out")); auto in_grads = ctx.MultiOutput<LoDTensor>(framework::GradVarName("X")); auto *cvm = ctx.Input<LoDTensor>("CVM"); std::string pooltype = ctx.Attr<std::string>("pooltype"); auto use_cvm = ctx.Attr<bool>("use_cvm"); const int cvm_offset = ctx.Attr<int>("cvm_offset"); const auto slot_size = in_grads.size(); std::vector<const T *> out_grads_data(slot_size); std::vector<T *> in_grads_data(slot_size); std::vector<const T *> cvm_data(slot_size); std::vector<const size_t *> lods_data(slot_size); int embedding_size = in_grads[0]->numel() / in_grads[0]->dims()[0]; int batch_size = -1; std::vector<paddle::framework::MixVector<size_t> *> mix_lods_v(slot_size); for (size_t i = 0; i < slot_size; ++i) { auto *in_grad = in_grads[i]; Vector<size_t> lods; if (in_grad->lod().size() != 0) { auto lod = in_grad->lod(); lods = lod[0]; } else { lods.push_back(0); for (int i = 0; i < in_grad->dims()[0]; i++) { lods.push_back(i + 1); } } int cur_batch_size = in_grad->lod().size() ? in_grad->lod()[0].size() - 1 : in_grad->dims()[0]; if (batch_size == -1) { batch_size = cur_batch_size; } else { PADDLE_ENFORCE_EQ(batch_size, cur_batch_size, platform::errors::PreconditionNotMet( "The batch size of all input should be same, " "please cheack, last batchsize is %d, current " "batchsize is %d", batch_size, cur_batch_size)); } auto *out_grad = out_grads[i]; out_grads_data[i] = reinterpret_cast<const T *>(out_grad->data<T>()); in_grads_data[i] = reinterpret_cast<T *>(in_grad->mutable_data<T>(ctx.GetPlace())); mix_lods_v[i] = new paddle::framework::MixVector<size_t>(&lods); lods_data[i] = mix_lods_v[i]->CUDAData(ctx.GetPlace()); cvm_data[i] = reinterpret_cast<const T *>(cvm->data<T>()); } FusedSeqpoolCVMGrad(ctx, out_grads_data, in_grads_data, cvm_data, lods_data, batch_size, slot_size, embedding_size, use_cvm, cvm_offset); for (int i = 0; i < slot_size; i++) { delete mix_lods_v[i]; } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(fused_seqpool_cvm, ops::FusedSeqpoolCVMCUDAKernel<float>); REGISTER_OP_CUDA_KERNEL(fused_seqpool_cvm_grad, ops::FusedSeqpoolCVMGradCUDAKernel<float>);
c0378e5d185004d960c1c9d3ea3dedc4b87672d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pairwise_transform.h" __device__ float op(float d1,float d2,float *params) { return d1 != d2; } __device__ float op(float d1,float *params) { return d1; } extern "C" __global__ void eq_strided_float(int n, int xOffset,int yOffset,float *dx, float *dy,int incx,int incy,float *params,float *result,int incz) { transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result,incz); }
c0378e5d185004d960c1c9d3ea3dedc4b87672d1.cu
#include "pairwise_transform.h" __device__ float op(float d1,float d2,float *params) { return d1 != d2; } __device__ float op(float d1,float *params) { return d1; } extern "C" __global__ void eq_strided_float(int n, int xOffset,int yOffset,float *dx, float *dy,int incx,int incy,float *params,float *result,int incz) { transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result,incz); }
6848c24b5ef36fcac09f1fc42e149df50dc1759c.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _DERIVS_ #define _DERIVS_ #include "globalVars.h" #include "devFunctionProtos.h" #include <hip/hip_runtime.h> __device__ double alpha_n(double vm); __device__ double alpha_m(double vm); __device__ double alpha_h(double vm); __device__ double beta_n(double vm); __device__ double beta_m(double vm); __device__ double beta_h(double vm); __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __device__ double alpha_n(double vm) { double out; if(vm != -34) { out = 0.1 * (vm + 34) / (1 - exp(-0.1 * (vm + 34))); } else { out = 0.1; } return out; } __device__ double beta_n(double vm) { double out; out = 1.25 * exp(- (vm + 44) / 80); return out; } __device__ double alpha_m(double vm) { double out; if(vm != -30) { out = 0.1 * (vm + 30) / (1 - exp(-0.1 * (vm + 30))); } else { out = 1; } return out; } __device__ double beta_m(double vm) { double out; out = 4 * exp(-(vm + 55) / 18); return out; } __device__ double alpha_h(double vm) { double out; out = 0.7 * exp(- (vm + 44) / 20); return out; } __device__ double beta_h(double vm) { double out; out = 10 / (exp(-0.1 * (vm + 14)) + 1); return out; } __device__ double m_inf(double vm) { double out, temp; temp = alpha_m(vm); out = temp / (temp + beta_m(vm)); return out; } //z is the gating varible of the adaptation current __device__ double z_inf(double(vm)) { double out; out = 1 / (1 + exp(-0.7 *(vm + 30))); return out; } /* extern double dt, *iSynap; stateVar = [vm, n, z, h] z - gating variable of the adaptation current */ __device__ void derivs(double t, double stateVar[], double dydx[], double isynap, double ibg, double iff) { double cur = 0.0; unsigned int kNeuron = threadIdx.x + blockDim.x * blockIdx.x; double bgPrefactor = 1.0, iffPrefactor = 1.0; if(kNeuron < N_NEURONS) { /* cur = 0.1 * sqrt(K);*/ /*if((kNeuron == 0 & t >= 30 & t <= 35) | (kNeuron == 1 & t >= 280 & t <= 285)) {cur = 10;} else {cur = 0.0;}*/ /* if(kNeuron >= 13520) { cur = 3.0; }*/ if (kNeuron < NE) { dydx[0] = 1/Cm * (cur - G_Na * pow(m_inf(stateVar[0]), 3) * stateVar[3] * (stateVar[0] - E_Na) - G_K * pow(stateVar[1], 4) * (stateVar[0] - E_K) - G_L_E * (stateVar[0] - E_L) - G_adapt * stateVar[2] * (stateVar[0] - E_K) + isynap + bgPrefactor * ibg + iffPrefactor * iff); } else { dydx[0] = 1/Cm * (cur - G_Na * pow(m_inf(stateVar[0]), 3) * stateVar[3] * (stateVar[0] - E_Na) - G_K * pow(stateVar[1], 4) * (stateVar[0] - E_K) - G_L_I * (stateVar[0] - E_L) - 0.0 * G_adapt * stateVar[2] * (stateVar[0] - E_K) + isynap + bgPrefactor * ibg + iffPrefactor * iff); } dydx[1] = alpha_n(stateVar[0]) * (1 - stateVar[1]) - beta_n(stateVar[0]) * stateVar[1]; dydx[2] = 1 / Tau_adapt * (z_inf(stateVar[0]) - stateVar[2]); dydx[3] = alpha_h(stateVar[0]) * (1 - stateVar[3]) - beta_h(stateVar[0]) * stateVar[3]; } } #endif
6848c24b5ef36fcac09f1fc42e149df50dc1759c.cu
#ifndef _DERIVS_ #define _DERIVS_ #include "globalVars.h" #include "devFunctionProtos.h" #include <cuda.h> __device__ double alpha_n(double vm); __device__ double alpha_m(double vm); __device__ double alpha_h(double vm); __device__ double beta_n(double vm); __device__ double beta_m(double vm); __device__ double beta_h(double vm); __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __device__ double alpha_n(double vm) { double out; if(vm != -34) { out = 0.1 * (vm + 34) / (1 - exp(-0.1 * (vm + 34))); } else { out = 0.1; } return out; } __device__ double beta_n(double vm) { double out; out = 1.25 * exp(- (vm + 44) / 80); return out; } __device__ double alpha_m(double vm) { double out; if(vm != -30) { out = 0.1 * (vm + 30) / (1 - exp(-0.1 * (vm + 30))); } else { out = 1; } return out; } __device__ double beta_m(double vm) { double out; out = 4 * exp(-(vm + 55) / 18); return out; } __device__ double alpha_h(double vm) { double out; out = 0.7 * exp(- (vm + 44) / 20); return out; } __device__ double beta_h(double vm) { double out; out = 10 / (exp(-0.1 * (vm + 14)) + 1); return out; } __device__ double m_inf(double vm) { double out, temp; temp = alpha_m(vm); out = temp / (temp + beta_m(vm)); return out; } //z is the gating varible of the adaptation current __device__ double z_inf(double(vm)) { double out; out = 1 / (1 + exp(-0.7 *(vm + 30))); return out; } /* extern double dt, *iSynap; stateVar = [vm, n, z, h] z - gating variable of the adaptation current */ __device__ void derivs(double t, double stateVar[], double dydx[], double isynap, double ibg, double iff) { double cur = 0.0; unsigned int kNeuron = threadIdx.x + blockDim.x * blockIdx.x; double bgPrefactor = 1.0, iffPrefactor = 1.0; if(kNeuron < N_NEURONS) { /* cur = 0.1 * sqrt(K);*/ /*if((kNeuron == 0 & t >= 30 & t <= 35) | (kNeuron == 1 & t >= 280 & t <= 285)) {cur = 10;} else {cur = 0.0;}*/ /* if(kNeuron >= 13520) { cur = 3.0; }*/ if (kNeuron < NE) { dydx[0] = 1/Cm * (cur - G_Na * pow(m_inf(stateVar[0]), 3) * stateVar[3] * (stateVar[0] - E_Na) - G_K * pow(stateVar[1], 4) * (stateVar[0] - E_K) - G_L_E * (stateVar[0] - E_L) - G_adapt * stateVar[2] * (stateVar[0] - E_K) + isynap + bgPrefactor * ibg + iffPrefactor * iff); } else { dydx[0] = 1/Cm * (cur - G_Na * pow(m_inf(stateVar[0]), 3) * stateVar[3] * (stateVar[0] - E_Na) - G_K * pow(stateVar[1], 4) * (stateVar[0] - E_K) - G_L_I * (stateVar[0] - E_L) - 0.0 * G_adapt * stateVar[2] * (stateVar[0] - E_K) + isynap + bgPrefactor * ibg + iffPrefactor * iff); } dydx[1] = alpha_n(stateVar[0]) * (1 - stateVar[1]) - beta_n(stateVar[0]) * stateVar[1]; dydx[2] = 1 / Tau_adapt * (z_inf(stateVar[0]) - stateVar[2]); dydx[3] = alpha_h(stateVar[0]) * (1 - stateVar[3]) - beta_h(stateVar[0]) * stateVar[3]; } } #endif
f475e9e03e452a748583eb2aaea123dc434a404d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/shift_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void ShiftForward(const int nthreads, int N, int C, int H, int W, const Dtype* U, Dtype* V, const int direction, const int displacement) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % W; const int s = (index / W) % H; const int j = (index / (W * H)) % C; const int i = index / (W * H * C); int bt = t; int bs = s; if(direction == 0){ bt = t + displacement; // displacement > 0 V[i * (C * H * W) + j * (H * W) + W * s + t] = (bt >= 0 & bt < W) ? U[i * (C * H * W) + j * (H * W) + W * s + bt] : 0; } else{ bs = s + displacement; // displacement > 0 V[i * (C * H * W) + j * (H * W) + W * s + t] = (bs >= 0 & bs < H) ? U[i * (C * H * W) + j * (H * W) + W * bs + t] : 0; } } } template <typename Dtype> void ShiftLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const int direction = this->layer_param().shift_param().direction(); const int displacement = this->layer_param().shift_param().displacement(); hipLaunchKernelGGL(( ShiftForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width(), bottom[0]->gpu_data(), top[0]->mutable_gpu_data(), direction, displacement); } template <typename Dtype> __global__ void ShiftBackward(const int nthreads, int N, int C, int H, int W, const Dtype* U, Dtype* V, const int direction, const int displacement) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % W; const int s = (index / W) % H; const int j = (index / (W * H)) % C; const int i = index / (W * H * C); int bt = t; int bs = s; // U: top_diff // V: bottom_diff if(direction == 0){ bt = t - displacement; // displacement > 0 V[i * (C * H * W) + j * (H * W) + W * s + t] = (bt >= 0 & bt < W) ? U[i * (C * H * W) + j * (H * W) + W * s + bt] : 0; } else{ bs = s - displacement; // displacement > 0 V[i * (C * H * W) + j * (H * W) + W * s + t] = (bs >= 0 & bs < H) ? U[i * (C * H * W) + j * (H * W) + W * bs + t] : 0; } } } template <typename Dtype> void ShiftLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int count = top[0]->count(); const int direction = this->layer_param().shift_param().direction(); const int displacement = this->layer_param().shift_param().displacement(); hipLaunchKernelGGL(( ShiftBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width(), top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(), direction, displacement); } INSTANTIATE_LAYER_GPU_FUNCS(ShiftLayer); } // namespace caffe
f475e9e03e452a748583eb2aaea123dc434a404d.cu
#include <cfloat> #include <vector> #include "caffe/layers/shift_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void ShiftForward(const int nthreads, int N, int C, int H, int W, const Dtype* U, Dtype* V, const int direction, const int displacement) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % W; const int s = (index / W) % H; const int j = (index / (W * H)) % C; const int i = index / (W * H * C); int bt = t; int bs = s; if(direction == 0){ bt = t + displacement; // displacement > 0 V[i * (C * H * W) + j * (H * W) + W * s + t] = (bt >= 0 & bt < W) ? U[i * (C * H * W) + j * (H * W) + W * s + bt] : 0; } else{ bs = s + displacement; // displacement > 0 V[i * (C * H * W) + j * (H * W) + W * s + t] = (bs >= 0 & bs < H) ? U[i * (C * H * W) + j * (H * W) + W * bs + t] : 0; } } } template <typename Dtype> void ShiftLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const int direction = this->layer_param().shift_param().direction(); const int displacement = this->layer_param().shift_param().displacement(); ShiftForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width(), bottom[0]->gpu_data(), top[0]->mutable_gpu_data(), direction, displacement); } template <typename Dtype> __global__ void ShiftBackward(const int nthreads, int N, int C, int H, int W, const Dtype* U, Dtype* V, const int direction, const int displacement) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % W; const int s = (index / W) % H; const int j = (index / (W * H)) % C; const int i = index / (W * H * C); int bt = t; int bs = s; // U: top_diff // V: bottom_diff if(direction == 0){ bt = t - displacement; // displacement > 0 V[i * (C * H * W) + j * (H * W) + W * s + t] = (bt >= 0 & bt < W) ? U[i * (C * H * W) + j * (H * W) + W * s + bt] : 0; } else{ bs = s - displacement; // displacement > 0 V[i * (C * H * W) + j * (H * W) + W * s + t] = (bs >= 0 & bs < H) ? U[i * (C * H * W) + j * (H * W) + W * bs + t] : 0; } } } template <typename Dtype> void ShiftLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int count = top[0]->count(); const int direction = this->layer_param().shift_param().direction(); const int displacement = this->layer_param().shift_param().displacement(); ShiftBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width(), top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff(), direction, displacement); } INSTANTIATE_LAYER_GPU_FUNCS(ShiftLayer); } // namespace caffe
31bbda353b7146fb9ad57be61f91e8576de5ed60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void vector_add(int *a, int *b, int *c) { /* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */ int index = threadIdx.x; c[index] = a[index] + b[index]; } #define dim 3 int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; //since we will be sending an array to represend the matrix. int size = dim * dim * sizeof( int ); /* allocate space for device copies of a, b, c */ hipMalloc( (void **) &d_a, size ); hipMalloc( (void **) &d_b, size ); hipMalloc( (void **) &d_c, size ); /* allocate space for host copies of a, b, c and setup input values */ a = (int *)malloc( size ); b = (int *)malloc( size ); c = (int *)malloc( size ); for( int i = 0; i < dim * dim; i++ ) { a[i] = b[i] = i; c[i] = 0; } printf("A and B are:\n"); for(int i=0; i< dim * dim; i++) { if(i%dim == 0) printf("\n"); printf("%d ", a[i]); } /* copy inputs to device */ hipMemcpy( d_a, a, size, hipMemcpyHostToDevice ); hipMemcpy( d_b, b, size, hipMemcpyHostToDevice ); dim3 dimBlock(10, 1 ); dim3 dimGrid( 1, 1 ); hipLaunchKernelGGL(( vector_add), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, d_b, d_c); /* copy result back to host */ hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost ); printf("\n\nTheir sum =\n"); for(int i=0; i< dim * dim; i++) { if(i%dim == 0) printf("\n"); printf("%d ", c[i]); } free(a); free(b); free(c); hipFree( d_a ); hipFree( d_b ); hipFree( d_c ); return 0; }
31bbda353b7146fb9ad57be61f91e8576de5ed60.cu
#include <stdio.h> __global__ void vector_add(int *a, int *b, int *c) { /* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */ int index = threadIdx.x; c[index] = a[index] + b[index]; } #define dim 3 int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; //since we will be sending an array to represend the matrix. int size = dim * dim * sizeof( int ); /* allocate space for device copies of a, b, c */ cudaMalloc( (void **) &d_a, size ); cudaMalloc( (void **) &d_b, size ); cudaMalloc( (void **) &d_c, size ); /* allocate space for host copies of a, b, c and setup input values */ a = (int *)malloc( size ); b = (int *)malloc( size ); c = (int *)malloc( size ); for( int i = 0; i < dim * dim; i++ ) { a[i] = b[i] = i; c[i] = 0; } printf("A and B are:\n"); for(int i=0; i< dim * dim; i++) { if(i%dim == 0) printf("\n"); printf("%d ", a[i]); } /* copy inputs to device */ cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice ); cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice ); dim3 dimBlock(10, 1 ); dim3 dimGrid( 1, 1 ); vector_add<<<dimGrid,dimBlock>>>( d_a, d_b, d_c); /* copy result back to host */ cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost ); printf("\n\nTheir sum =\n"); for(int i=0; i< dim * dim; i++) { if(i%dim == 0) printf("\n"); printf("%d ", c[i]); } free(a); free(b); free(c); cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); return 0; }
628356641df12eda46eb67904cb804aec126de12.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dynet/cuda.h" #include "dynet/gpu-ops.h" #include "dynet/gpu-kernels.h" #include "dynet/functors.h" namespace dynet { namespace gpu { // CUDA kernel. Each thread takes care of one element of c __global__ void ker_dense_to_sparse_assign(int n, const unsigned int *idx, float *src, float *trg) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) trg[idx[id]] = src[id]; } void dense_to_sparse_assign(int n, const unsigned int *idx, float *src, float *trg) { if(n > 0) { auto tb = SizeToBlockThreadPair(n); int total_size = tb.first*tb.second; for(int curr_pos = 0; curr_pos < n; curr_pos += total_size) hipLaunchKernelGGL(( ker_dense_to_sparse_assign), dim3(tb.first), dim3(tb.second), 0, 0, min(total_size, n-curr_pos), idx+curr_pos, src+curr_pos, trg); } } // CUDA kernel. Each thread takes care of one element of c __global__ void ker_sparse_to_dense_assign(int n, const unsigned int *idx, float *src, float *trg) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) trg[id] = src[idx[id]]; } void sparse_to_dense_assign(int n, const unsigned int *idx, float *src, float *trg) { if(n > 0) { auto tb = SizeToBlockThreadPair(n); int total_size = tb.first*tb.second; for(int curr_pos = 0; curr_pos < n; curr_pos += total_size) hipLaunchKernelGGL(( ker_sparse_to_dense_assign), dim3(tb.first), dim3(tb.second), 0, 0, min(total_size, n-curr_pos), idx+curr_pos, src, trg+curr_pos); } } // CUDA kernel. Each thread takes care of one element of c __global__ void ker_dense_to_sparse_subtract(int n, const unsigned int *idx, float *src, float *trg) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) atomicAdd(trg + idx[id], -src[id]); } void dense_to_sparse_subtract(int n, const unsigned int *idx, float *src, float *trg) { if(n > 0) { auto tb = SizeToBlockThreadPair(n); int total_size = tb.first*tb.second; for(int curr_pos = 0; curr_pos < n; curr_pos += total_size) hipLaunchKernelGGL(( ker_dense_to_sparse_subtract), dim3(tb.first), dim3(tb.second), 0, 0, min(total_size, n-curr_pos), idx+curr_pos, src+curr_pos, trg); } } // CUDA kernel. Each thread takes care of one element of c __global__ void ker_sparse_to_dense_block_assign_and_multiply(int n, const unsigned *idx, int bsize, float mult, float* src, float *trg) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n*bsize) trg[id] = src[idx[id/bsize]*bsize+id%bsize] * mult; } void sparse_to_dense_block_assign_and_multiply(int n, const unsigned *idx, int bsize, float mult, float *src, float *trg) { if(n > 0) { auto tb = SizeToBlockThreadPair(n*bsize); int total_size = tb.first*tb.second; for(int curr_pos = 0; curr_pos < n; curr_pos += total_size/bsize) hipLaunchKernelGGL(( ker_sparse_to_dense_block_assign_and_multiply), dim3(tb.first), dim3(tb.second), 0, 0, min(total_size/bsize, n-curr_pos), idx+curr_pos, bsize, mult, src, trg+curr_pos*bsize); } } // CUDA kernel. Each thread takes care of one row copy. __global__ void ker_parallel_memcpy(int num_seqs, float **src, float **trg, float **len) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; int seq_id = id % num_seqs; int i = id / num_seqs; if (i < (unsigned long)len[seq_id]) trg[seq_id][i] = src[seq_id][i]; __syncthreads(); } void parallel_memcpy(int num_seqs, int max_len, float **src, float **trg, float **len) { if(num_seqs > 0) { auto tb = SizeToBlockThreadPair(num_seqs*max_len); hipLaunchKernelGGL(( ker_parallel_memcpy), dim3(tb.first), dim3(tb.second), 0, 0, num_seqs, src, trg, len); } } // CUDA kernel. Each thread takes care of one row copy. __global__ void ker_parallel_accumulate(int num_seqs, float **src, float **trg, float **len) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; int seq_id = id % num_seqs; int i = id / num_seqs; if (i < (unsigned long)len[seq_id]) atomicAdd(&trg[seq_id][i], src[seq_id][i]); __syncthreads(); } void parallel_accumulate(int num_seqs, int max_len, float **src, float **trg, float **len) { if(num_seqs > 0) { auto tb = SizeToBlockThreadPair(num_seqs*max_len); hipLaunchKernelGGL(( ker_parallel_accumulate), dim3(tb.first), dim3(tb.second), 0, 0, num_seqs, src, trg, len); } } // CUDA kernel. Each thread takes care of one element of c __global__ void ker_dense_to_sparse_block_add(int n, const unsigned *idx, int bsize, float* src, float *trg) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n*bsize) atomicAdd(trg + idx[id/bsize]*bsize+id%bsize, src[id]); } void dense_to_sparse_block_add(int n, const unsigned *idx, int bsize, float *src, float *trg) { if(n > 0) { auto tb = SizeToBlockThreadPair(n*bsize); int total_size = tb.first*tb.second; for(int curr_pos = 0; curr_pos < n; curr_pos += total_size/bsize) hipLaunchKernelGGL(( ker_dense_to_sparse_block_add), dim3(tb.first), dim3(tb.second), 0, 0, min(total_size/bsize, n-curr_pos), idx+curr_pos, bsize, src+curr_pos*bsize, trg); } } } // namespace gpu } // namespace dynet
628356641df12eda46eb67904cb804aec126de12.cu
#include "dynet/cuda.h" #include "dynet/gpu-ops.h" #include "dynet/gpu-kernels.h" #include "dynet/functors.h" namespace dynet { namespace gpu { // CUDA kernel. Each thread takes care of one element of c __global__ void ker_dense_to_sparse_assign(int n, const unsigned int *idx, float *src, float *trg) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) trg[idx[id]] = src[id]; } void dense_to_sparse_assign(int n, const unsigned int *idx, float *src, float *trg) { if(n > 0) { auto tb = SizeToBlockThreadPair(n); int total_size = tb.first*tb.second; for(int curr_pos = 0; curr_pos < n; curr_pos += total_size) ker_dense_to_sparse_assign<<<tb.first, tb.second>>>(min(total_size, n-curr_pos), idx+curr_pos, src+curr_pos, trg); } } // CUDA kernel. Each thread takes care of one element of c __global__ void ker_sparse_to_dense_assign(int n, const unsigned int *idx, float *src, float *trg) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) trg[id] = src[idx[id]]; } void sparse_to_dense_assign(int n, const unsigned int *idx, float *src, float *trg) { if(n > 0) { auto tb = SizeToBlockThreadPair(n); int total_size = tb.first*tb.second; for(int curr_pos = 0; curr_pos < n; curr_pos += total_size) ker_sparse_to_dense_assign<<<tb.first, tb.second>>>(min(total_size, n-curr_pos), idx+curr_pos, src, trg+curr_pos); } } // CUDA kernel. Each thread takes care of one element of c __global__ void ker_dense_to_sparse_subtract(int n, const unsigned int *idx, float *src, float *trg) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) atomicAdd(trg + idx[id], -src[id]); } void dense_to_sparse_subtract(int n, const unsigned int *idx, float *src, float *trg) { if(n > 0) { auto tb = SizeToBlockThreadPair(n); int total_size = tb.first*tb.second; for(int curr_pos = 0; curr_pos < n; curr_pos += total_size) ker_dense_to_sparse_subtract<<<tb.first, tb.second>>>(min(total_size, n-curr_pos), idx+curr_pos, src+curr_pos, trg); } } // CUDA kernel. Each thread takes care of one element of c __global__ void ker_sparse_to_dense_block_assign_and_multiply(int n, const unsigned *idx, int bsize, float mult, float* src, float *trg) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n*bsize) trg[id] = src[idx[id/bsize]*bsize+id%bsize] * mult; } void sparse_to_dense_block_assign_and_multiply(int n, const unsigned *idx, int bsize, float mult, float *src, float *trg) { if(n > 0) { auto tb = SizeToBlockThreadPair(n*bsize); int total_size = tb.first*tb.second; for(int curr_pos = 0; curr_pos < n; curr_pos += total_size/bsize) ker_sparse_to_dense_block_assign_and_multiply<<<tb.first, tb.second>>>(min(total_size/bsize, n-curr_pos), idx+curr_pos, bsize, mult, src, trg+curr_pos*bsize); } } // CUDA kernel. Each thread takes care of one row copy. __global__ void ker_parallel_memcpy(int num_seqs, float **src, float **trg, float **len) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; int seq_id = id % num_seqs; int i = id / num_seqs; if (i < (unsigned long)len[seq_id]) trg[seq_id][i] = src[seq_id][i]; __syncthreads(); } void parallel_memcpy(int num_seqs, int max_len, float **src, float **trg, float **len) { if(num_seqs > 0) { auto tb = SizeToBlockThreadPair(num_seqs*max_len); ker_parallel_memcpy<<<tb.first, tb.second>>>(num_seqs, src, trg, len); } } // CUDA kernel. Each thread takes care of one row copy. __global__ void ker_parallel_accumulate(int num_seqs, float **src, float **trg, float **len) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; int seq_id = id % num_seqs; int i = id / num_seqs; if (i < (unsigned long)len[seq_id]) atomicAdd(&trg[seq_id][i], src[seq_id][i]); __syncthreads(); } void parallel_accumulate(int num_seqs, int max_len, float **src, float **trg, float **len) { if(num_seqs > 0) { auto tb = SizeToBlockThreadPair(num_seqs*max_len); ker_parallel_accumulate<<<tb.first, tb.second>>>(num_seqs, src, trg, len); } } // CUDA kernel. Each thread takes care of one element of c __global__ void ker_dense_to_sparse_block_add(int n, const unsigned *idx, int bsize, float* src, float *trg) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n*bsize) atomicAdd(trg + idx[id/bsize]*bsize+id%bsize, src[id]); } void dense_to_sparse_block_add(int n, const unsigned *idx, int bsize, float *src, float *trg) { if(n > 0) { auto tb = SizeToBlockThreadPair(n*bsize); int total_size = tb.first*tb.second; for(int curr_pos = 0; curr_pos < n; curr_pos += total_size/bsize) ker_dense_to_sparse_block_add<<<tb.first, tb.second>>>(min(total_size/bsize, n-curr_pos), idx+curr_pos, bsize, src+curr_pos*bsize, trg); } } } // namespace gpu } // namespace dynet
63cea77fd3665cc28b0ecabb92b9fccc4e9f46cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/extrema.h> #include <thrust/execution_policy.h> template<typename Iterator, typename Iterator2> __global__ void min_element_kernel(Iterator first, Iterator last, Iterator2 result) { *result = thrust::min_element(thrust::seq, first, last); } template<typename Iterator, typename BinaryPredicate, typename Iterator2> __global__ void min_element_kernel(Iterator first, Iterator last, BinaryPredicate pred, Iterator2 result) { *result = thrust::min_element(thrust::seq, first, last, pred); } template<typename T> void TestMinElementDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; typedef typename thrust::device_vector<T>::iterator iter_type; thrust::device_vector<iter_type> d_result(1); typename thrust::host_vector<T>::iterator h_min = thrust::min_element(h_data.begin(), h_data.end()); hipLaunchKernelGGL(( min_element_kernel), dim3(1),dim3(1), 0, 0, d_data.begin(), d_data.end(), d_result.begin()); ASSERT_EQUAL(h_min - h_data.begin(), (iter_type)d_result[0] - d_data.begin()); typename thrust::host_vector<T>::iterator h_max = thrust::min_element(h_data.begin(), h_data.end(), thrust::greater<T>()); hipLaunchKernelGGL(( min_element_kernel), dim3(1),dim3(1), 0, 0, d_data.begin(), d_data.end(), thrust::greater<T>(), d_result.begin()); ASSERT_EQUAL(h_max - h_data.begin(), (iter_type)d_result[0] - d_data.begin()); } DECLARE_VARIABLE_UNITTEST(TestMinElementDeviceSeq);
63cea77fd3665cc28b0ecabb92b9fccc4e9f46cb.cu
#include <unittest/unittest.h> #include <thrust/extrema.h> #include <thrust/execution_policy.h> template<typename Iterator, typename Iterator2> __global__ void min_element_kernel(Iterator first, Iterator last, Iterator2 result) { *result = thrust::min_element(thrust::seq, first, last); } template<typename Iterator, typename BinaryPredicate, typename Iterator2> __global__ void min_element_kernel(Iterator first, Iterator last, BinaryPredicate pred, Iterator2 result) { *result = thrust::min_element(thrust::seq, first, last, pred); } template<typename T> void TestMinElementDeviceSeq(const size_t n) { thrust::host_vector<T> h_data = unittest::random_samples<T>(n); thrust::device_vector<T> d_data = h_data; typedef typename thrust::device_vector<T>::iterator iter_type; thrust::device_vector<iter_type> d_result(1); typename thrust::host_vector<T>::iterator h_min = thrust::min_element(h_data.begin(), h_data.end()); min_element_kernel<<<1,1>>>(d_data.begin(), d_data.end(), d_result.begin()); ASSERT_EQUAL(h_min - h_data.begin(), (iter_type)d_result[0] - d_data.begin()); typename thrust::host_vector<T>::iterator h_max = thrust::min_element(h_data.begin(), h_data.end(), thrust::greater<T>()); min_element_kernel<<<1,1>>>(d_data.begin(), d_data.end(), thrust::greater<T>(), d_result.begin()); ASSERT_EQUAL(h_max - h_data.begin(), (iter_type)d_result[0] - d_data.begin()); } DECLARE_VARIABLE_UNITTEST(TestMinElementDeviceSeq);
421da0bf022895991598bc600cba9f2602e60902.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * @Author: Haozhe Xie * @Date: 2019-12-19 20:36:36 * @Last Modified by: Haozhe Xie * @Last Modified time: 2020-06-17 14:55:41 * @Email: [email protected] */ #include <torch/extension.h> #include <cmath> #include <cstdio> #include <cstdlib> #define CUDA_NUM_THREADS 512 // Computer the number of threads needed in GPU inline int get_n_threads(int n) { const int pow_2 = ::log(static_cast<float>(n)) / ::log(2.0); return max(min(1 << pow_2, CUDA_NUM_THREADS), 1); } __device__ int compute_index(int offset_x, int offset_y, int offset_z, int scale) { return offset_x * scale * scale + offset_y * scale + offset_z; } __global__ void cubic_feature_sampling_kernel( int scale, int neighborhood_size, int n_vertices, int n_pts, int n_cubic_channels, const float *__restrict__ ptcloud, const float *__restrict__ cubic_features, float *__restrict__ point_features, int *__restrict__ grid_pt_indexes) { int batch_index = blockIdx.x; int index = threadIdx.x; int stride = blockDim.x; int cub_scale = scale * scale * scale; ptcloud += batch_index * n_pts * 3; cubic_features += batch_index * n_cubic_channels * cub_scale; point_features += batch_index * n_pts * n_vertices * n_cubic_channels; grid_pt_indexes += batch_index * n_pts * n_vertices; for (int i = index; i < n_pts; i += stride) { float pt_x = ptcloud[i * 3 + 0]; float pt_y = ptcloud[i * 3 + 1]; float pt_z = ptcloud[i * 3 + 2]; int lower_x = ::floor(pt_x); int upper_x = ::ceil(pt_x); if (lower_x == upper_x) { upper_x += 1; } int lower_y = ::floor(pt_y); int upper_y = ::ceil(pt_y); if (lower_y == upper_y) { upper_y += 1; } int lower_z = ::floor(pt_z); int upper_z = ::ceil(pt_z); if (lower_z == upper_z) { upper_z += 1; } int ns = neighborhood_size - 1; int vertex_idx = 0; for (int j = lower_x - ns; j <= upper_x + ns; ++j) { for (int k = lower_y - ns; k <= upper_y + ns; ++k) { for (int m = lower_z - ns; m <= upper_z + ns; ++m) { if (j < 0 || j >= scale || k < 0 || k >= scale || m < 0 || m >= scale) { // Ignore points lies out of the grid grid_pt_indexes[i * n_vertices + vertex_idx++] = -1; } else { // Calcuating indexes for adjacent vertices grid_pt_indexes[i * n_vertices + vertex_idx++] = compute_index(j, k, m, scale); } } } } // Gather Features for (int j = 0; j < n_vertices; ++j) { for (int k = 0; k < n_cubic_channels; ++k) { int vertex_idx = grid_pt_indexes[i * n_vertices + j]; if (vertex_idx == -1) { continue; } int feature_idx = i * n_vertices * n_cubic_channels + j * n_cubic_channels + k; float feature_val = cubic_features[k * cub_scale + vertex_idx]; point_features[feature_idx] = feature_val; } } } } std::vector<torch::Tensor> cubic_feature_sampling_cuda_forward( int scale, int neighborhood_size, torch::Tensor ptcloud, torch::Tensor cubic_features, hipStream_t stream) { int batch_size = ptcloud.size(0); int n_pts = ptcloud.size(1); int n_cubic_channels = cubic_features.size(1); int n_vertices = ::pow(neighborhood_size * 2, 3); torch::Tensor point_features = torch::zeros({batch_size, n_pts, n_vertices, n_cubic_channels}, torch::CUDA(torch::kFloat)); torch::Tensor grid_pt_indexes = torch::zeros({batch_size, n_pts, n_vertices}, torch::CUDA(torch::kInt)); hipLaunchKernelGGL(( cubic_feature_sampling_kernel), dim3(batch_size), dim3(get_n_threads(n_pts)), 0, stream, scale, neighborhood_size, n_vertices, n_pts, n_cubic_channels, ptcloud.data_ptr<float>(), cubic_features.data_ptr<float>(), point_features.data_ptr<float>(), grid_pt_indexes.data_ptr<int>()); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error in cubic_feature_sampling_cuda_forward: %s\n", hipGetErrorString(err)); } return {point_features, grid_pt_indexes}; } __global__ void cubic_feature_sampling_grad_kernel( int scale, int neighborhood_size, int n_vertices, int n_pts, int n_cubic_channels, const float *__restrict__ grad_point_features, const int *__restrict__ grid_pt_indexes, float *__restrict__ grad_ptcloud, float *__restrict__ grad_cubic_features) { int batch_index = blockIdx.x; int index = threadIdx.x; int stride = blockDim.x; int cub_scale = scale * scale * scale; grad_point_features += batch_index * n_pts * n_vertices * n_cubic_channels; grid_pt_indexes += batch_index * n_pts * n_vertices; grad_ptcloud += batch_index * n_pts * 3; grad_cubic_features += batch_index * n_cubic_channels * cub_scale; for (int i = index; i < n_pts; i += stride) { for (int j = 0; j < n_vertices; ++j) { int vertex_idx = grid_pt_indexes[i * n_vertices + j]; if (vertex_idx == -1) { continue; } for (int k = 0; k < n_cubic_channels; ++k) { int grad_idx = i * n_vertices * n_cubic_channels + j * n_cubic_channels + k; float grad_val = grad_point_features[grad_idx]; // Fix bugs: the gradients of ceil and floor functions are zeros. // Ref: https://github.com/tensorflow/tensorflow/issues/897 // atomicAdd(&(grad_ptcloud[i * 3 + 0]), grad_val); // atomicAdd(&(grad_ptcloud[i * 3 + 1]), grad_val); // atomicAdd(&(grad_ptcloud[i * 3 + 2]), grad_val); atomicAdd(&(grad_cubic_features[k * cub_scale + vertex_idx]), grad_val); } } } } std::vector<torch::Tensor> cubic_feature_sampling_cuda_backward( int scale, int neighborhood_size, torch::Tensor grad_point_features, torch::Tensor grid_pt_indexes, hipStream_t stream) { int batch_size = grad_point_features.size(0); int n_cubic_channels = grad_point_features.size(3); int n_pts = grid_pt_indexes.size(1); int n_vertices = ::pow(neighborhood_size * 2, 3); torch::Tensor grad_ptcloud = torch::zeros({batch_size, n_pts, 3}, torch::CUDA(torch::kFloat)); torch::Tensor grad_cubic_features = torch::zeros({batch_size, n_cubic_channels, scale, scale, scale}, torch::CUDA(torch::kFloat)); hipLaunchKernelGGL(( cubic_feature_sampling_grad_kernel), dim3(batch_size), dim3(get_n_threads(n_pts)), 0, stream, scale, neighborhood_size, n_vertices, n_pts, n_cubic_channels, grad_point_features.data_ptr<float>(), grid_pt_indexes.data_ptr<int>(), grad_ptcloud.data_ptr<float>(), grad_cubic_features.data_ptr<float>()); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error in cubic_feature_sampling_cuda_backward: %s\n", hipGetErrorString(err)); } return {grad_ptcloud, grad_cubic_features}; }
421da0bf022895991598bc600cba9f2602e60902.cu
/* * @Author: Haozhe Xie * @Date: 2019-12-19 20:36:36 * @Last Modified by: Haozhe Xie * @Last Modified time: 2020-06-17 14:55:41 * @Email: [email protected] */ #include <torch/extension.h> #include <cmath> #include <cstdio> #include <cstdlib> #define CUDA_NUM_THREADS 512 // Computer the number of threads needed in GPU inline int get_n_threads(int n) { const int pow_2 = std::log(static_cast<float>(n)) / std::log(2.0); return max(min(1 << pow_2, CUDA_NUM_THREADS), 1); } __device__ int compute_index(int offset_x, int offset_y, int offset_z, int scale) { return offset_x * scale * scale + offset_y * scale + offset_z; } __global__ void cubic_feature_sampling_kernel( int scale, int neighborhood_size, int n_vertices, int n_pts, int n_cubic_channels, const float *__restrict__ ptcloud, const float *__restrict__ cubic_features, float *__restrict__ point_features, int *__restrict__ grid_pt_indexes) { int batch_index = blockIdx.x; int index = threadIdx.x; int stride = blockDim.x; int cub_scale = scale * scale * scale; ptcloud += batch_index * n_pts * 3; cubic_features += batch_index * n_cubic_channels * cub_scale; point_features += batch_index * n_pts * n_vertices * n_cubic_channels; grid_pt_indexes += batch_index * n_pts * n_vertices; for (int i = index; i < n_pts; i += stride) { float pt_x = ptcloud[i * 3 + 0]; float pt_y = ptcloud[i * 3 + 1]; float pt_z = ptcloud[i * 3 + 2]; int lower_x = std::floor(pt_x); int upper_x = std::ceil(pt_x); if (lower_x == upper_x) { upper_x += 1; } int lower_y = std::floor(pt_y); int upper_y = std::ceil(pt_y); if (lower_y == upper_y) { upper_y += 1; } int lower_z = std::floor(pt_z); int upper_z = std::ceil(pt_z); if (lower_z == upper_z) { upper_z += 1; } int ns = neighborhood_size - 1; int vertex_idx = 0; for (int j = lower_x - ns; j <= upper_x + ns; ++j) { for (int k = lower_y - ns; k <= upper_y + ns; ++k) { for (int m = lower_z - ns; m <= upper_z + ns; ++m) { if (j < 0 || j >= scale || k < 0 || k >= scale || m < 0 || m >= scale) { // Ignore points lies out of the grid grid_pt_indexes[i * n_vertices + vertex_idx++] = -1; } else { // Calcuating indexes for adjacent vertices grid_pt_indexes[i * n_vertices + vertex_idx++] = compute_index(j, k, m, scale); } } } } // Gather Features for (int j = 0; j < n_vertices; ++j) { for (int k = 0; k < n_cubic_channels; ++k) { int vertex_idx = grid_pt_indexes[i * n_vertices + j]; if (vertex_idx == -1) { continue; } int feature_idx = i * n_vertices * n_cubic_channels + j * n_cubic_channels + k; float feature_val = cubic_features[k * cub_scale + vertex_idx]; point_features[feature_idx] = feature_val; } } } } std::vector<torch::Tensor> cubic_feature_sampling_cuda_forward( int scale, int neighborhood_size, torch::Tensor ptcloud, torch::Tensor cubic_features, cudaStream_t stream) { int batch_size = ptcloud.size(0); int n_pts = ptcloud.size(1); int n_cubic_channels = cubic_features.size(1); int n_vertices = std::pow(neighborhood_size * 2, 3); torch::Tensor point_features = torch::zeros({batch_size, n_pts, n_vertices, n_cubic_channels}, torch::CUDA(torch::kFloat)); torch::Tensor grid_pt_indexes = torch::zeros({batch_size, n_pts, n_vertices}, torch::CUDA(torch::kInt)); cubic_feature_sampling_kernel<<<batch_size, get_n_threads(n_pts), 0, stream>>>( scale, neighborhood_size, n_vertices, n_pts, n_cubic_channels, ptcloud.data_ptr<float>(), cubic_features.data_ptr<float>(), point_features.data_ptr<float>(), grid_pt_indexes.data_ptr<int>()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in cubic_feature_sampling_cuda_forward: %s\n", cudaGetErrorString(err)); } return {point_features, grid_pt_indexes}; } __global__ void cubic_feature_sampling_grad_kernel( int scale, int neighborhood_size, int n_vertices, int n_pts, int n_cubic_channels, const float *__restrict__ grad_point_features, const int *__restrict__ grid_pt_indexes, float *__restrict__ grad_ptcloud, float *__restrict__ grad_cubic_features) { int batch_index = blockIdx.x; int index = threadIdx.x; int stride = blockDim.x; int cub_scale = scale * scale * scale; grad_point_features += batch_index * n_pts * n_vertices * n_cubic_channels; grid_pt_indexes += batch_index * n_pts * n_vertices; grad_ptcloud += batch_index * n_pts * 3; grad_cubic_features += batch_index * n_cubic_channels * cub_scale; for (int i = index; i < n_pts; i += stride) { for (int j = 0; j < n_vertices; ++j) { int vertex_idx = grid_pt_indexes[i * n_vertices + j]; if (vertex_idx == -1) { continue; } for (int k = 0; k < n_cubic_channels; ++k) { int grad_idx = i * n_vertices * n_cubic_channels + j * n_cubic_channels + k; float grad_val = grad_point_features[grad_idx]; // Fix bugs: the gradients of ceil and floor functions are zeros. // Ref: https://github.com/tensorflow/tensorflow/issues/897 // atomicAdd(&(grad_ptcloud[i * 3 + 0]), grad_val); // atomicAdd(&(grad_ptcloud[i * 3 + 1]), grad_val); // atomicAdd(&(grad_ptcloud[i * 3 + 2]), grad_val); atomicAdd(&(grad_cubic_features[k * cub_scale + vertex_idx]), grad_val); } } } } std::vector<torch::Tensor> cubic_feature_sampling_cuda_backward( int scale, int neighborhood_size, torch::Tensor grad_point_features, torch::Tensor grid_pt_indexes, cudaStream_t stream) { int batch_size = grad_point_features.size(0); int n_cubic_channels = grad_point_features.size(3); int n_pts = grid_pt_indexes.size(1); int n_vertices = std::pow(neighborhood_size * 2, 3); torch::Tensor grad_ptcloud = torch::zeros({batch_size, n_pts, 3}, torch::CUDA(torch::kFloat)); torch::Tensor grad_cubic_features = torch::zeros({batch_size, n_cubic_channels, scale, scale, scale}, torch::CUDA(torch::kFloat)); cubic_feature_sampling_grad_kernel<<<batch_size, get_n_threads(n_pts), 0, stream>>>( scale, neighborhood_size, n_vertices, n_pts, n_cubic_channels, grad_point_features.data_ptr<float>(), grid_pt_indexes.data_ptr<int>(), grad_ptcloud.data_ptr<float>(), grad_cubic_features.data_ptr<float>()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in cubic_feature_sampling_cuda_backward: %s\n", cudaGetErrorString(err)); } return {grad_ptcloud, grad_cubic_features}; }
7802c1c9d32567fc4690a15e9df3d6bd47daee78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <prof.cu> #include <stdio.h> #include <stdlib.h> #include <cutil_inline.h> #include "clock_kernel.cu" // This example shows how to use the clock function to measure the performance of // a kernel accurately. // // Blocks are executed in parallel and out of order. Since there's no synchronization // mechanism between blocks, we measure the clock once for each block. The clock // samples are written to device memory. #define NUM_BLOCKS 64 #define NUM_THREADS 256 // It's interesting to change the number of blocks and the number of threads to // understand how to keep the hardware busy. // // Here are some numbers I get on my G80: // blocks - clocks // 1 - 3096 // 8 - 3232 // 16 - 3364 // 32 - 4615 // 64 - 9981 // // With less than 16 blocks some of the multiprocessors of the device are idle. With // more than 16 you are using all the multiprocessors, but there's only one block per // multiprocessor and that doesn't allow you to hide the latency of the memory. With // more than 32 the speed scales linearly. int main(int argc, char** argv) { GpuProfiling::initProf(); // use command-line specified CUDA device, otherwise use device with highest Gflops/s if ( cutCheckCmdLineFlag(argc, (const char **)argv, "device")) cutilDeviceInit(argc, argv); else hipSetDevice( cutGetMaxGflopsDeviceId() ); float * dinput = NULL; float * doutput = NULL; clock_t * dtimer = NULL; clock_t timer[NUM_BLOCKS * 2]; float input[NUM_THREADS * 2]; for (int i = 0; i < NUM_THREADS * 2; i++) { input[i] = (float)i; } cutilSafeCall(hipMalloc((void**)&dinput, sizeof(float) * NUM_THREADS * 2)); cutilSafeCall(hipMalloc((void**)&doutput, sizeof(float) * NUM_BLOCKS)); cutilSafeCall(hipMalloc((void**)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2)); cutilSafeCall(hipMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2, hipMemcpyHostToDevice)); GpuProfiling::prepareProfiling( NUM_BLOCKS, NUM_THREADS); hipLaunchKernelGGL(( timedReduction), dim3(NUM_BLOCKS), dim3(NUM_THREADS), sizeof(float) * 2 * NUM_THREADS, 0, dinput, doutput, dtimer); GpuProfiling::addResults("timedReduction"); //cutilSafeCall(hipMemcpy(output, doutput, sizeof(float) * NUM_BLOCKS, hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2, hipMemcpyDeviceToHost)); GpuProfiling::printResults(); cutilSafeCall(hipFree(dinput)); cutilSafeCall(hipFree(doutput)); cutilSafeCall(hipFree(dtimer)); // This test always passes. printf( "PASSED\n"); // Compute the difference between the last block end and the first block start. clock_t minStart = timer[0]; clock_t maxEnd = timer[NUM_BLOCKS]; for (int i = 1; i < NUM_BLOCKS; i++) { minStart = timer[i] < minStart ? timer[i] : minStart; maxEnd = timer[NUM_BLOCKS+i] > maxEnd ? timer[NUM_BLOCKS+i] : maxEnd; } printf("time = %d\n", maxEnd - minStart); hipDeviceReset(); exit(0); cutilExit(argc, argv); }
7802c1c9d32567fc4690a15e9df3d6bd47daee78.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <prof.cu> #include <stdio.h> #include <stdlib.h> #include <cutil_inline.h> #include "clock_kernel.cu" // This example shows how to use the clock function to measure the performance of // a kernel accurately. // // Blocks are executed in parallel and out of order. Since there's no synchronization // mechanism between blocks, we measure the clock once for each block. The clock // samples are written to device memory. #define NUM_BLOCKS 64 #define NUM_THREADS 256 // It's interesting to change the number of blocks and the number of threads to // understand how to keep the hardware busy. // // Here are some numbers I get on my G80: // blocks - clocks // 1 - 3096 // 8 - 3232 // 16 - 3364 // 32 - 4615 // 64 - 9981 // // With less than 16 blocks some of the multiprocessors of the device are idle. With // more than 16 you are using all the multiprocessors, but there's only one block per // multiprocessor and that doesn't allow you to hide the latency of the memory. With // more than 32 the speed scales linearly. int main(int argc, char** argv) { GpuProfiling::initProf(); // use command-line specified CUDA device, otherwise use device with highest Gflops/s if ( cutCheckCmdLineFlag(argc, (const char **)argv, "device")) cutilDeviceInit(argc, argv); else cudaSetDevice( cutGetMaxGflopsDeviceId() ); float * dinput = NULL; float * doutput = NULL; clock_t * dtimer = NULL; clock_t timer[NUM_BLOCKS * 2]; float input[NUM_THREADS * 2]; for (int i = 0; i < NUM_THREADS * 2; i++) { input[i] = (float)i; } cutilSafeCall(cudaMalloc((void**)&dinput, sizeof(float) * NUM_THREADS * 2)); cutilSafeCall(cudaMalloc((void**)&doutput, sizeof(float) * NUM_BLOCKS)); cutilSafeCall(cudaMalloc((void**)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2)); cutilSafeCall(cudaMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2, cudaMemcpyHostToDevice)); GpuProfiling::prepareProfiling( NUM_BLOCKS, NUM_THREADS); timedReduction<<<NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 * NUM_THREADS>>>(dinput, doutput, dtimer); GpuProfiling::addResults("timedReduction"); //cutilSafeCall(cudaMemcpy(output, doutput, sizeof(float) * NUM_BLOCKS, cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2, cudaMemcpyDeviceToHost)); GpuProfiling::printResults(); cutilSafeCall(cudaFree(dinput)); cutilSafeCall(cudaFree(doutput)); cutilSafeCall(cudaFree(dtimer)); // This test always passes. printf( "PASSED\n"); // Compute the difference between the last block end and the first block start. clock_t minStart = timer[0]; clock_t maxEnd = timer[NUM_BLOCKS]; for (int i = 1; i < NUM_BLOCKS; i++) { minStart = timer[i] < minStart ? timer[i] : minStart; maxEnd = timer[NUM_BLOCKS+i] > maxEnd ? timer[NUM_BLOCKS+i] : maxEnd; } printf("time = %d\n", maxEnd - minStart); cudaThreadExit(); exit(0); cutilExit(argc, argv); }
a3b468e59f889755e2ac7c09565b0f345165b0e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void matmulKernel(float* mat1,float* mat2, float* matP,int dim) { int thread_x,thread_y,i; thread_x=blockIdx.x*blockDim.x+threadIdx.x; thread_y=blockIdx.y*blockDim.y+threadIdx.y; if(thread_x<dim&&thread_y<dim) { float P_value=0.; for(i=0;i<dim;i++) { P_value+=mat1[thread_y*dim+i]*mat2[i*dim+thread_x]; } matP[thread_y*dim+thread_x]=P_value; } }
a3b468e59f889755e2ac7c09565b0f345165b0e2.cu
#include "includes.h" __global__ void matmulKernel(float* mat1,float* mat2, float* matP,int dim) { int thread_x,thread_y,i; thread_x=blockIdx.x*blockDim.x+threadIdx.x; thread_y=blockIdx.y*blockDim.y+threadIdx.y; if(thread_x<dim&&thread_y<dim) { float P_value=0.; for(i=0;i<dim;i++) { P_value+=mat1[thread_y*dim+i]*mat2[i*dim+thread_x]; } matP[thread_y*dim+thread_x]=P_value; } }
119df2d4f269de2b7217c357dda54575f8a63ab9.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <boost/program_options.hpp> using namespace cv; using namespace boost::program_options; using namespace std; #define ERROR_HANDLE(ans) { gpuAssert((ans), __FILE__, __LINE__); } void gpuAssert(hipError_t cu_err, const char *file, int line) { if (cu_err != hipSuccess) { cerr << "GPU Assert : " << hipGetErrorString(cu_err) << " " << file << " " << line << endl; exit(EXIT_FAILURE); } } __global__ void cuColorToGray(uchar *d_out, uchar *d_in, const int width, const int height) { const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; if (col < width && row < height) { uchar b = d_in[row * width * 3 + col * 3]; uchar g = d_in[row * width * 3 + col * 3 + 1]; uchar r = d_in[row * width * 3 + col * 3 + 2]; d_out[row * width + col] = (uchar)(r * 0.299f + g * 0.587f + b * 0.114f); } } Mat colorToGray(Mat &colorImg, size_t block_width, size_t block_height) { const int width = colorImg.cols, height = colorImg.rows; Mat grayImg = Mat::zeros(height, width, CV_8UC1); const int grayByte = width * height * sizeof(uchar); const int bgrByte = grayByte * 3; uchar *dImg, *dGrayImg; ERROR_HANDLE(hipMalloc(&dImg, bgrByte)); ERROR_HANDLE(hipMalloc(&dGrayImg, grayByte)); ERROR_HANDLE(hipMemcpy(dImg, colorImg.data, bgrByte, hipMemcpyHostToDevice)); const dim3 block(block_width, block_height); const dim3 grid(width / block.x + 1, height / block.y + 1); hipLaunchKernelGGL(( cuColorToGray), dim3(grid), dim3(block), 0, 0, dGrayImg, dImg, width, height); ERROR_HANDLE(hipMemcpy(grayImg.data, dGrayImg, grayByte, hipMemcpyDeviceToHost)); ERROR_HANDLE(hipFree(dImg)); ERROR_HANDLE(hipFree(dGrayImg)); return grayImg; } int main(int argc, char* argv[]) { // arg parser options_description desc("all options"); desc.add_options() ("help,h", "produce a help screen") ("image_path", value<string>(), "Image File Path") ("window_size", value<int>(), "Disply Window Size") ("block_width", value<uint>(), "CUDA Grid Block Width") ("block_height", value<uint>(), "CUDA Grid Block Height"); variables_map vm; store(parse_command_line(argc, argv, desc), vm); if (vm.count("help")) { cout << desc; return 1; } string filePath = ""; Mat img; int window_width = 800; uint block_width = 16; uint block_height = 16; if (vm.count("image_path")) { filePath = vm["image_path"].as<string>(); img = imread(filePath, CV_LOAD_IMAGE_COLOR); if (!img.data) { cout << "Invalid Image Path" << endl; return 0; } cout << "Image Load : " << filePath << endl; } if (vm.count("window_size")) { window_width = vm["window_size"].as<int>(); } if (vm.count("block_width")) { block_width = vm["block_width"].as<uint>(); } if (vm.count("block_height")) { block_height = vm["block_height"].as<uint>(); } // color to gray Mat grayImg = colorToGray(img, block_width, block_height); const int width = img.cols; const int height = img.rows; const int window_height = int(height * float(window_width) / width); namedWindow("COLOR_IMAGE", WINDOW_NORMAL); namedWindow("GRAY_IMAGE", WINDOW_NORMAL); resizeWindow("COLOR_IMAGE", window_width, window_height) ; resizeWindow("GRAY_IMAGE", window_width, window_height); imshow("COLOR_IMAGE", img); imshow("GRAY_IMAGE", grayImg); waitKey(0); return 0; }
119df2d4f269de2b7217c357dda54575f8a63ab9.cu
#include <iostream> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <boost/program_options.hpp> using namespace cv; using namespace boost::program_options; using namespace std; #define ERROR_HANDLE(ans) { gpuAssert((ans), __FILE__, __LINE__); } void gpuAssert(cudaError_t cu_err, const char *file, int line) { if (cu_err != cudaSuccess) { cerr << "GPU Assert : " << cudaGetErrorString(cu_err) << " " << file << " " << line << endl; exit(EXIT_FAILURE); } } __global__ void cuColorToGray(uchar *d_out, uchar *d_in, const int width, const int height) { const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; if (col < width && row < height) { uchar b = d_in[row * width * 3 + col * 3]; uchar g = d_in[row * width * 3 + col * 3 + 1]; uchar r = d_in[row * width * 3 + col * 3 + 2]; d_out[row * width + col] = (uchar)(r * 0.299f + g * 0.587f + b * 0.114f); } } Mat colorToGray(Mat &colorImg, size_t block_width, size_t block_height) { const int width = colorImg.cols, height = colorImg.rows; Mat grayImg = Mat::zeros(height, width, CV_8UC1); const int grayByte = width * height * sizeof(uchar); const int bgrByte = grayByte * 3; uchar *dImg, *dGrayImg; ERROR_HANDLE(cudaMalloc(&dImg, bgrByte)); ERROR_HANDLE(cudaMalloc(&dGrayImg, grayByte)); ERROR_HANDLE(cudaMemcpy(dImg, colorImg.data, bgrByte, cudaMemcpyHostToDevice)); const dim3 block(block_width, block_height); const dim3 grid(width / block.x + 1, height / block.y + 1); cuColorToGray<<<grid, block>>>(dGrayImg, dImg, width, height); ERROR_HANDLE(cudaMemcpy(grayImg.data, dGrayImg, grayByte, cudaMemcpyDeviceToHost)); ERROR_HANDLE(cudaFree(dImg)); ERROR_HANDLE(cudaFree(dGrayImg)); return grayImg; } int main(int argc, char* argv[]) { // arg parser options_description desc("all options"); desc.add_options() ("help,h", "produce a help screen") ("image_path", value<string>(), "Image File Path") ("window_size", value<int>(), "Disply Window Size") ("block_width", value<uint>(), "CUDA Grid Block Width") ("block_height", value<uint>(), "CUDA Grid Block Height"); variables_map vm; store(parse_command_line(argc, argv, desc), vm); if (vm.count("help")) { cout << desc; return 1; } string filePath = ""; Mat img; int window_width = 800; uint block_width = 16; uint block_height = 16; if (vm.count("image_path")) { filePath = vm["image_path"].as<string>(); img = imread(filePath, CV_LOAD_IMAGE_COLOR); if (!img.data) { cout << "Invalid Image Path" << endl; return 0; } cout << "Image Load : " << filePath << endl; } if (vm.count("window_size")) { window_width = vm["window_size"].as<int>(); } if (vm.count("block_width")) { block_width = vm["block_width"].as<uint>(); } if (vm.count("block_height")) { block_height = vm["block_height"].as<uint>(); } // color to gray Mat grayImg = colorToGray(img, block_width, block_height); const int width = img.cols; const int height = img.rows; const int window_height = int(height * float(window_width) / width); namedWindow("COLOR_IMAGE", WINDOW_NORMAL); namedWindow("GRAY_IMAGE", WINDOW_NORMAL); resizeWindow("COLOR_IMAGE", window_width, window_height) ; resizeWindow("GRAY_IMAGE", window_width, window_height); imshow("COLOR_IMAGE", img); imshow("GRAY_IMAGE", grayImg); waitKey(0); return 0; }
8dc623afaa37426144b9399a12c2bb75fd66b67f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" typedef unsigned uint32_t; extern "C" { __global__ void my_dot( float const * const a, float const * const b, float * const c, uint32_t const n ) { uint32_t const ix = blockDim.x * blockIdx.x + threadIdx.x; if( ix < n ) { c[ix] = a[ix] + b[ix]; } } }
8dc623afaa37426144b9399a12c2bb75fd66b67f.cu
typedef unsigned uint32_t; extern "C" { __global__ void my_dot( float const * const a, float const * const b, float * const c, uint32_t const n ) { uint32_t const ix = blockDim.x * blockIdx.x + threadIdx.x; if( ix < n ) { c[ix] = a[ix] + b[ix]; } } }
e584958f67802bbc4fa6d83f90b1e61ba540aab6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //---------------------------------*-CUDA-*----------------------------------// // Copyright 2020 UT-Battelle, LLC, and other Celeritas developers. // See the top-level COPYRIGHT file for details. // SPDX-License-Identifier: (Apache-2.0 OR MIT) //---------------------------------------------------------------------------// //! \file InitializeTracks.cu //---------------------------------------------------------------------------// #include "InitializeTracks.hh" #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/remove.h> #include <thrust/scan.h> #include <vector> #include "base/Atomics.hh" #include "base/DeviceVector.hh" #include "base/KernelParamCalculator.cuda.hh" #include "geometry/GeoTrackView.hh" #include "physics/base/ParticleTrackView.hh" #include "sim/SimTrackView.hh" namespace celeritas { namespace detail { namespace { //---------------------------------------------------------------------------// // HELPER CLASSES //---------------------------------------------------------------------------// struct IsEqual { size_type value; CELER_FUNCTION bool operator()(size_type x) const { return x == value; } }; //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// /*! * Initialize the track states on device. The track initializers are created * from either primary particles or secondaries. The new tracks are inserted * into empty slots (vacancies) in the track vector. */ __global__ void init_tracks_kernel(const StatePointers states, const ParamPointers params, const TrackInitializerPointers inits, size_type num_vacancies) { auto thread_id = KernelParamCalculator::thread_id().get(); if (thread_id < num_vacancies) { // Get the track initializer from the back of the vector. Since new // initializers are pushed to the back of the vector, these will be the // most recently added and therefore the ones that still might have a // parent they can copy the geometry state from. const TrackInitializer& init = inits.initializers[inits.initializers.size() - thread_id - 1]; // Index of the empty slot to create the new track in ThreadId slot_id( inits.vacancies[inits.vacancies.size() - thread_id - 1]); // Initialize the simulation state { SimTrackView sim(states.sim, slot_id); sim = init.sim; } // Initialize the particle physics data { ParticleTrackView particle( params.particle, states.particle, slot_id); particle = init.particle; } // Initialize the geometry { GeoTrackView geo(params.geo, states.geo, slot_id); if (thread_id < inits.parent.size()) { // Copy the geometry state from the parent for improved // performance TrackId::value_type parent_id = inits.parent[inits.parent.size() - thread_id - 1]; GeoTrackView parent( params.geo, states.geo, ThreadId{parent_id}); geo = {parent, init.geo.dir}; } else { // Initialize it from the position (more expensive) geo = init.geo; } } } } //---------------------------------------------------------------------------// /*! * Find empty slots in the track vector and count the number of secondaries * that survived cutoffs for each interaction. If the track is dead and * produced secondaries, fill the empty track slot with one of the secondaries. */ __global__ void locate_alive_kernel(const StatePointers states, const ParamPointers params, const TrackInitializerPointers inits) { auto thread_id = KernelParamCalculator::thread_id(); if (thread_id < states.size()) { // Secondary to copy to the parent's track slot if the parent has died size_type secondary_id = flag_id(); // Count how many secondaries survived cutoffs for each track inits.secondary_counts[thread_id.get()] = 0; Interaction& result = states.interactions[thread_id.get()]; for (size_type i = 0; i < result.secondaries.size(); ++i) { if (result.secondaries[i]) { if (secondary_id == flag_id()) { secondary_id = i; } ++inits.secondary_counts[thread_id.get()]; } } SimTrackView sim(states.sim, thread_id); if (sim.alive()) { // The track is alive: mark this track slot as active inits.vacancies[thread_id.get()] = flag_id(); } else if (secondary_id != flag_id()) { // The track is dead and produced secondaries: fill the empty track // slot with the first secondary and mark the track slot as active // Calculate the track ID of the secondary // TODO: This is nondeterministic; we need to calculate the track // ID in a reproducible way. CELER_ASSERT(sim.event_id() < inits.track_counter.size()); TrackId::value_type track_id = atomic_add(&inits.track_counter[sim.event_id().get()], 1u); // Initialize the simulation state sim = {TrackId{track_id}, sim.track_id(), sim.event_id(), true}; // Initialize the particle state from the secondary Secondary& secondary = result.secondaries[secondary_id]; ParticleTrackView particle( params.particle, states.particle, thread_id); particle = {secondary.def_id, secondary.energy}; // Keep the parent's geometry state GeoTrackView geo(params.geo, states.geo, thread_id); geo = {geo, secondary.direction}; // Mark the secondary as processed and the track as active --inits.secondary_counts[thread_id.get()]; secondary = Secondary{}; inits.vacancies[thread_id.get()] = flag_id(); } else { // The track is dead and did not produce secondaries: store the // index so it can be used later to initialize a new track inits.vacancies[thread_id.get()] = thread_id.get(); } } } //---------------------------------------------------------------------------// /*! * Create track initializers on device from primary particles. */ __global__ void process_primaries_kernel(const Span<const Primary> primaries, const Span<TrackInitializer> initializers) { auto thread_id = KernelParamCalculator::thread_id(); if (thread_id < primaries.size()) { TrackInitializer& init = initializers[thread_id.get()]; const Primary& primary = primaries[thread_id.get()]; // Construct a track initializer from a primary particle init.sim.track_id = primary.track_id; init.sim.parent_id = TrackId{}; init.sim.event_id = primary.event_id; init.sim.alive = true; init.geo.pos = primary.position; init.geo.dir = primary.direction; init.particle.def_id = primary.def_id; init.particle.energy = primary.energy; } } //---------------------------------------------------------------------------// /*! * Create track initializers on device from secondary particles. */ __global__ void process_secondaries_kernel(const StatePointers states, const ParamPointers params, const TrackInitializerPointers inits) { auto thread_id = KernelParamCalculator::thread_id(); if (thread_id < states.size()) { // Construct the state accessors GeoTrackView geo(params.geo, states.geo, thread_id); SimTrackView sim(states.sim, thread_id); // Offset in the vector of track initializers size_type offset_id = inits.secondary_counts[thread_id.get()]; Interaction& result = states.interactions[thread_id.get()]; for (const auto& secondary : result.secondaries) { if (secondary) { // The secondary survived cutoffs: convert to a track CELER_ASSERT(offset_id < inits.initializers.size()); TrackInitializer& init = inits.initializers[offset_id]; // Store the thread ID of the secondary's parent CELER_ASSERT(offset_id < inits.parent.size()); inits.parent[offset_id++] = thread_id.get(); // Calculate the track ID of the secondary // TODO: This is nondeterministic; we need to calculate the // track ID in a reproducible way. CELER_ASSERT(sim.event_id() < inits.track_counter.size()); TrackId::value_type track_id = atomic_add( &inits.track_counter[sim.event_id().get()], 1u); // Construct a track initializer from a secondary init.sim.track_id = TrackId{track_id}; init.sim.parent_id = sim.track_id(); init.sim.event_id = sim.event_id(); init.sim.alive = true; init.geo.pos = geo.pos(); init.geo.dir = secondary.direction; init.particle.def_id = secondary.def_id; init.particle.energy = secondary.energy; } } // Clear the secondaries from the interaction result.secondaries = {}; } } } // end namespace //---------------------------------------------------------------------------// // KERNEL INTERFACE //---------------------------------------------------------------------------// /*! * Initialize the track states on device. */ void init_tracks(const StatePointers& states, const ParamPointers& params, const TrackInitializerPointers& inits) { // Number of vacancies, limited by the initializer size auto num_vacancies = ::min(inits.vacancies.size(), inits.initializers.size()); // Initialize tracks on device KernelParamCalculator calc_launch_params; auto lparams = calc_launch_params(num_vacancies); hipLaunchKernelGGL(( init_tracks_kernel), dim3(lparams.grid_size), dim3(lparams.block_size), 0, 0, states, params, inits, num_vacancies); CELER_CUDA_CHECK_ERROR(); } //---------------------------------------------------------------------------// /*! * Find empty slots in the vector of tracks and count the number of secondaries * that survived cutoffs for each interaction. */ void locate_alive(const StatePointers& states, const ParamPointers& params, const TrackInitializerPointers& inits) { KernelParamCalculator calc_launch_params; auto lparams = calc_launch_params(states.size()); hipLaunchKernelGGL(( locate_alive_kernel), dim3(lparams.grid_size), dim3(lparams.block_size), 0, 0, states, params, inits); CELER_CUDA_CHECK_ERROR(); } //---------------------------------------------------------------------------// /*! * Create track initializers from primary particles. */ void process_primaries(Span<const Primary> primaries, const TrackInitializerPointers& inits) { CELER_EXPECT(primaries.size() <= inits.initializers.size()); // Get a view to the last primaries.size() initializers auto initializers = inits.initializers.subspan(inits.initializers.size() - primaries.size()); CELER_ASSERT(initializers.size() == primaries.size()); KernelParamCalculator calc_launch_params; auto lparams = calc_launch_params(primaries.size()); hipLaunchKernelGGL(( process_primaries_kernel), dim3(lparams.grid_size), dim3(lparams.block_size), 0, 0, primaries, initializers); CELER_CUDA_CHECK_ERROR(); } //---------------------------------------------------------------------------// /*! * Create track initializers from secondary particles. */ void process_secondaries(const StatePointers& states, const ParamPointers& params, TrackInitializerPointers inits) { CELER_EXPECT(states.size() <= inits.secondary_counts.size()); CELER_EXPECT(states.size() <= states.interactions.size()); // Get a view to the last num_secondaries initializers inits.initializers = inits.initializers.subspan(inits.initializers.size() - inits.parent.size()); KernelParamCalculator calc_launch_params; auto lparams = calc_launch_params(states.size()); hipLaunchKernelGGL(( process_secondaries_kernel), dim3(lparams.grid_size), dim3(lparams.block_size), 0, 0, states, params, inits); CELER_CUDA_CHECK_ERROR(); } //---------------------------------------------------------------------------// /*! * Remove all elements in the vacancy vector that were flagged as active * tracks. */ size_type remove_if_alive(Span<size_type> vacancies) { thrust::device_ptr<size_type> end = thrust::remove_if( thrust::device_pointer_cast(vacancies.data()), thrust::device_pointer_cast(vacancies.data() + vacancies.size()), IsEqual{flag_id()}); CELER_CUDA_CHECK_ERROR(); // New size of the vacancy vector size_type result = thrust::raw_pointer_cast(end) - vacancies.data(); return result; } //---------------------------------------------------------------------------// /*! * Sum the total number of surviving secondaries. */ size_type reduce_counts(Span<size_type> counts) { size_type result = thrust::reduce( thrust::device_pointer_cast(counts.data()), thrust::device_pointer_cast(counts.data()) + counts.size(), size_type(0), thrust::plus<size_type>()); CELER_CUDA_CHECK_ERROR(); return result; } //---------------------------------------------------------------------------// /*! * Do an exclusive scan of the number of surviving secondaries from each track. * * For an input array x, this calculates the exclusive prefix sum y of the * array elements, i.e., \f$ y_i = \sum_{j=0}^{i-1} x_j \f$, * where \f$ y_0 = 0 \f$, and stores the result in the input array. */ void exclusive_scan_counts(Span<size_type> counts) { thrust::exclusive_scan( thrust::device_pointer_cast(counts.data()), thrust::device_pointer_cast(counts.data()) + counts.size(), counts.data(), size_type(0)); CELER_CUDA_CHECK_ERROR(); } //---------------------------------------------------------------------------// } // namespace detail } // namespace celeritas
e584958f67802bbc4fa6d83f90b1e61ba540aab6.cu
//---------------------------------*-CUDA-*----------------------------------// // Copyright 2020 UT-Battelle, LLC, and other Celeritas developers. // See the top-level COPYRIGHT file for details. // SPDX-License-Identifier: (Apache-2.0 OR MIT) //---------------------------------------------------------------------------// //! \file InitializeTracks.cu //---------------------------------------------------------------------------// #include "InitializeTracks.hh" #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/remove.h> #include <thrust/scan.h> #include <vector> #include "base/Atomics.hh" #include "base/DeviceVector.hh" #include "base/KernelParamCalculator.cuda.hh" #include "geometry/GeoTrackView.hh" #include "physics/base/ParticleTrackView.hh" #include "sim/SimTrackView.hh" namespace celeritas { namespace detail { namespace { //---------------------------------------------------------------------------// // HELPER CLASSES //---------------------------------------------------------------------------// struct IsEqual { size_type value; CELER_FUNCTION bool operator()(size_type x) const { return x == value; } }; //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// /*! * Initialize the track states on device. The track initializers are created * from either primary particles or secondaries. The new tracks are inserted * into empty slots (vacancies) in the track vector. */ __global__ void init_tracks_kernel(const StatePointers states, const ParamPointers params, const TrackInitializerPointers inits, size_type num_vacancies) { auto thread_id = KernelParamCalculator::thread_id().get(); if (thread_id < num_vacancies) { // Get the track initializer from the back of the vector. Since new // initializers are pushed to the back of the vector, these will be the // most recently added and therefore the ones that still might have a // parent they can copy the geometry state from. const TrackInitializer& init = inits.initializers[inits.initializers.size() - thread_id - 1]; // Index of the empty slot to create the new track in ThreadId slot_id( inits.vacancies[inits.vacancies.size() - thread_id - 1]); // Initialize the simulation state { SimTrackView sim(states.sim, slot_id); sim = init.sim; } // Initialize the particle physics data { ParticleTrackView particle( params.particle, states.particle, slot_id); particle = init.particle; } // Initialize the geometry { GeoTrackView geo(params.geo, states.geo, slot_id); if (thread_id < inits.parent.size()) { // Copy the geometry state from the parent for improved // performance TrackId::value_type parent_id = inits.parent[inits.parent.size() - thread_id - 1]; GeoTrackView parent( params.geo, states.geo, ThreadId{parent_id}); geo = {parent, init.geo.dir}; } else { // Initialize it from the position (more expensive) geo = init.geo; } } } } //---------------------------------------------------------------------------// /*! * Find empty slots in the track vector and count the number of secondaries * that survived cutoffs for each interaction. If the track is dead and * produced secondaries, fill the empty track slot with one of the secondaries. */ __global__ void locate_alive_kernel(const StatePointers states, const ParamPointers params, const TrackInitializerPointers inits) { auto thread_id = KernelParamCalculator::thread_id(); if (thread_id < states.size()) { // Secondary to copy to the parent's track slot if the parent has died size_type secondary_id = flag_id(); // Count how many secondaries survived cutoffs for each track inits.secondary_counts[thread_id.get()] = 0; Interaction& result = states.interactions[thread_id.get()]; for (size_type i = 0; i < result.secondaries.size(); ++i) { if (result.secondaries[i]) { if (secondary_id == flag_id()) { secondary_id = i; } ++inits.secondary_counts[thread_id.get()]; } } SimTrackView sim(states.sim, thread_id); if (sim.alive()) { // The track is alive: mark this track slot as active inits.vacancies[thread_id.get()] = flag_id(); } else if (secondary_id != flag_id()) { // The track is dead and produced secondaries: fill the empty track // slot with the first secondary and mark the track slot as active // Calculate the track ID of the secondary // TODO: This is nondeterministic; we need to calculate the track // ID in a reproducible way. CELER_ASSERT(sim.event_id() < inits.track_counter.size()); TrackId::value_type track_id = atomic_add(&inits.track_counter[sim.event_id().get()], 1u); // Initialize the simulation state sim = {TrackId{track_id}, sim.track_id(), sim.event_id(), true}; // Initialize the particle state from the secondary Secondary& secondary = result.secondaries[secondary_id]; ParticleTrackView particle( params.particle, states.particle, thread_id); particle = {secondary.def_id, secondary.energy}; // Keep the parent's geometry state GeoTrackView geo(params.geo, states.geo, thread_id); geo = {geo, secondary.direction}; // Mark the secondary as processed and the track as active --inits.secondary_counts[thread_id.get()]; secondary = Secondary{}; inits.vacancies[thread_id.get()] = flag_id(); } else { // The track is dead and did not produce secondaries: store the // index so it can be used later to initialize a new track inits.vacancies[thread_id.get()] = thread_id.get(); } } } //---------------------------------------------------------------------------// /*! * Create track initializers on device from primary particles. */ __global__ void process_primaries_kernel(const Span<const Primary> primaries, const Span<TrackInitializer> initializers) { auto thread_id = KernelParamCalculator::thread_id(); if (thread_id < primaries.size()) { TrackInitializer& init = initializers[thread_id.get()]; const Primary& primary = primaries[thread_id.get()]; // Construct a track initializer from a primary particle init.sim.track_id = primary.track_id; init.sim.parent_id = TrackId{}; init.sim.event_id = primary.event_id; init.sim.alive = true; init.geo.pos = primary.position; init.geo.dir = primary.direction; init.particle.def_id = primary.def_id; init.particle.energy = primary.energy; } } //---------------------------------------------------------------------------// /*! * Create track initializers on device from secondary particles. */ __global__ void process_secondaries_kernel(const StatePointers states, const ParamPointers params, const TrackInitializerPointers inits) { auto thread_id = KernelParamCalculator::thread_id(); if (thread_id < states.size()) { // Construct the state accessors GeoTrackView geo(params.geo, states.geo, thread_id); SimTrackView sim(states.sim, thread_id); // Offset in the vector of track initializers size_type offset_id = inits.secondary_counts[thread_id.get()]; Interaction& result = states.interactions[thread_id.get()]; for (const auto& secondary : result.secondaries) { if (secondary) { // The secondary survived cutoffs: convert to a track CELER_ASSERT(offset_id < inits.initializers.size()); TrackInitializer& init = inits.initializers[offset_id]; // Store the thread ID of the secondary's parent CELER_ASSERT(offset_id < inits.parent.size()); inits.parent[offset_id++] = thread_id.get(); // Calculate the track ID of the secondary // TODO: This is nondeterministic; we need to calculate the // track ID in a reproducible way. CELER_ASSERT(sim.event_id() < inits.track_counter.size()); TrackId::value_type track_id = atomic_add( &inits.track_counter[sim.event_id().get()], 1u); // Construct a track initializer from a secondary init.sim.track_id = TrackId{track_id}; init.sim.parent_id = sim.track_id(); init.sim.event_id = sim.event_id(); init.sim.alive = true; init.geo.pos = geo.pos(); init.geo.dir = secondary.direction; init.particle.def_id = secondary.def_id; init.particle.energy = secondary.energy; } } // Clear the secondaries from the interaction result.secondaries = {}; } } } // end namespace //---------------------------------------------------------------------------// // KERNEL INTERFACE //---------------------------------------------------------------------------// /*! * Initialize the track states on device. */ void init_tracks(const StatePointers& states, const ParamPointers& params, const TrackInitializerPointers& inits) { // Number of vacancies, limited by the initializer size auto num_vacancies = std::min(inits.vacancies.size(), inits.initializers.size()); // Initialize tracks on device KernelParamCalculator calc_launch_params; auto lparams = calc_launch_params(num_vacancies); init_tracks_kernel<<<lparams.grid_size, lparams.block_size>>>( states, params, inits, num_vacancies); CELER_CUDA_CHECK_ERROR(); } //---------------------------------------------------------------------------// /*! * Find empty slots in the vector of tracks and count the number of secondaries * that survived cutoffs for each interaction. */ void locate_alive(const StatePointers& states, const ParamPointers& params, const TrackInitializerPointers& inits) { KernelParamCalculator calc_launch_params; auto lparams = calc_launch_params(states.size()); locate_alive_kernel<<<lparams.grid_size, lparams.block_size>>>( states, params, inits); CELER_CUDA_CHECK_ERROR(); } //---------------------------------------------------------------------------// /*! * Create track initializers from primary particles. */ void process_primaries(Span<const Primary> primaries, const TrackInitializerPointers& inits) { CELER_EXPECT(primaries.size() <= inits.initializers.size()); // Get a view to the last primaries.size() initializers auto initializers = inits.initializers.subspan(inits.initializers.size() - primaries.size()); CELER_ASSERT(initializers.size() == primaries.size()); KernelParamCalculator calc_launch_params; auto lparams = calc_launch_params(primaries.size()); process_primaries_kernel<<<lparams.grid_size, lparams.block_size>>>( primaries, initializers); CELER_CUDA_CHECK_ERROR(); } //---------------------------------------------------------------------------// /*! * Create track initializers from secondary particles. */ void process_secondaries(const StatePointers& states, const ParamPointers& params, TrackInitializerPointers inits) { CELER_EXPECT(states.size() <= inits.secondary_counts.size()); CELER_EXPECT(states.size() <= states.interactions.size()); // Get a view to the last num_secondaries initializers inits.initializers = inits.initializers.subspan(inits.initializers.size() - inits.parent.size()); KernelParamCalculator calc_launch_params; auto lparams = calc_launch_params(states.size()); process_secondaries_kernel<<<lparams.grid_size, lparams.block_size>>>( states, params, inits); CELER_CUDA_CHECK_ERROR(); } //---------------------------------------------------------------------------// /*! * Remove all elements in the vacancy vector that were flagged as active * tracks. */ size_type remove_if_alive(Span<size_type> vacancies) { thrust::device_ptr<size_type> end = thrust::remove_if( thrust::device_pointer_cast(vacancies.data()), thrust::device_pointer_cast(vacancies.data() + vacancies.size()), IsEqual{flag_id()}); CELER_CUDA_CHECK_ERROR(); // New size of the vacancy vector size_type result = thrust::raw_pointer_cast(end) - vacancies.data(); return result; } //---------------------------------------------------------------------------// /*! * Sum the total number of surviving secondaries. */ size_type reduce_counts(Span<size_type> counts) { size_type result = thrust::reduce( thrust::device_pointer_cast(counts.data()), thrust::device_pointer_cast(counts.data()) + counts.size(), size_type(0), thrust::plus<size_type>()); CELER_CUDA_CHECK_ERROR(); return result; } //---------------------------------------------------------------------------// /*! * Do an exclusive scan of the number of surviving secondaries from each track. * * For an input array x, this calculates the exclusive prefix sum y of the * array elements, i.e., \f$ y_i = \sum_{j=0}^{i-1} x_j \f$, * where \f$ y_0 = 0 \f$, and stores the result in the input array. */ void exclusive_scan_counts(Span<size_type> counts) { thrust::exclusive_scan( thrust::device_pointer_cast(counts.data()), thrust::device_pointer_cast(counts.data()) + counts.size(), counts.data(), size_type(0)); CELER_CUDA_CHECK_ERROR(); } //---------------------------------------------------------------------------// } // namespace detail } // namespace celeritas
Module.hip
// !!! This is a file automatically generated by hipify!!! # include "Module.cuh" #include "ModuleConnectionMap.h" #include "allocate.h" std::map <unsigned int, unsigned int> *SDL::detIdToIndex; void SDL::createModulesInUnifiedMemory(struct modules& modulesInGPU,unsigned int nModules) { /* modules stucture object will be created in Event.cu*/ #ifdef CACHE_ALLOC hipStream_t stream=0; modulesInGPU.detIds = (unsigned int*)cms::cuda::allocate_managed(nModules * sizeof(unsigned int),stream); modulesInGPU.moduleMap = (unsigned int*)cms::cuda::allocate_managed(nModules * 40 * sizeof(unsigned int),stream); modulesInGPU.nConnectedModules = (unsigned int*)cms::cuda::allocate_managed(nModules * sizeof(unsigned int),stream); modulesInGPU.drdzs = (float*)cms::cuda::allocate_managed(nModules * sizeof(float),stream); modulesInGPU.slopes = (float*)cms::cuda::allocate_managed(nModules * sizeof(float),stream); modulesInGPU.nModules = (unsigned int*)cms::cuda::allocate_managed(sizeof(unsigned int),stream); modulesInGPU.nLowerModules = (unsigned int*)cms::cuda::allocate_managed(sizeof(unsigned int),stream); modulesInGPU.layers = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.rings = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.modules = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.rods = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.subdets = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.sides = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.isInverted = (bool*)cms::cuda::allocate_managed(nModules * sizeof(bool),stream); modulesInGPU.isLower = (bool*)cms::cuda::allocate_managed(nModules * sizeof(bool),stream); modulesInGPU.nEligibleModules = (unsigned int*)cms::cuda::allocate_managed(sizeof(unsigned int),stream); modulesInGPU.nEligibleT5Modules = (unsigned int*)cms::cuda::allocate_managed(sizeof(unsigned int),stream); modulesInGPU.hitRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.mdRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.segmentRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.trackletRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.tripletRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.trackCandidateRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.quintupletRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.moduleType = (ModuleType*)cms::cuda::allocate_managed(nModules * sizeof(ModuleType),stream); modulesInGPU.moduleLayerType=(ModuleLayerType*)cms::cuda::allocate_managed(nModules * sizeof(ModuleLayerType),stream); #else hipMallocManaged(&modulesInGPU.detIds,nModules * sizeof(unsigned int)); hipMallocManaged(&modulesInGPU.moduleMap,nModules * 40 * sizeof(unsigned int)); hipMallocManaged(&modulesInGPU.nConnectedModules,nModules * sizeof(unsigned int)); hipMallocManaged(&modulesInGPU.drdzs,nModules * sizeof(float)); hipMallocManaged(&modulesInGPU.slopes,nModules * sizeof(float)); hipMallocManaged(&modulesInGPU.nModules,sizeof(unsigned int)); hipMallocManaged(&modulesInGPU.nLowerModules,sizeof(unsigned int)); hipMallocManaged(&modulesInGPU.layers,nModules * sizeof(short)); hipMallocManaged(&modulesInGPU.rings,nModules * sizeof(short)); hipMallocManaged(&modulesInGPU.modules,nModules * sizeof(short)); hipMallocManaged(&modulesInGPU.rods,nModules * sizeof(short)); hipMallocManaged(&modulesInGPU.subdets,nModules * sizeof(short)); hipMallocManaged(&modulesInGPU.sides,nModules * sizeof(short)); hipMallocManaged(&modulesInGPU.isInverted, nModules * sizeof(bool)); hipMallocManaged(&modulesInGPU.isLower, nModules * sizeof(bool)); hipMallocManaged(&modulesInGPU.nEligibleModules,sizeof(unsigned int)); hipMallocManaged(&modulesInGPU.nEligibleT5Modules, sizeof(unsigned int)); hipMallocManaged(&modulesInGPU.hitRanges,nModules * 2 * sizeof(int)); hipMallocManaged(&modulesInGPU.mdRanges,nModules * 2 * sizeof(int)); hipMallocManaged(&modulesInGPU.segmentRanges,nModules * 2 * sizeof(int)); hipMallocManaged(&modulesInGPU.trackletRanges,nModules * 2 * sizeof(int)); hipMallocManaged(&modulesInGPU.tripletRanges,nModules * 2 * sizeof(int)); hipMallocManaged(&modulesInGPU.trackCandidateRanges, nModules * 2 * sizeof(int)); hipMallocManaged(&modulesInGPU.quintupletRanges, nModules * 2 * sizeof(int)); hipMallocManaged(&modulesInGPU.moduleType,nModules * sizeof(ModuleType)); hipMallocManaged(&modulesInGPU.moduleLayerType,nModules * sizeof(ModuleLayerType)); #endif *modulesInGPU.nModules = nModules; } void SDL::createModulesInExplicitMemory(struct modules& modulesInGPU,unsigned int nModules) { /* modules stucture object will be created in Event.cu*/ #ifdef CACHE_ALLOC hipStream_t stream=0; int dev; hipGetDevice(&dev); modulesInGPU.detIds = (unsigned int*)cms::cuda::allocate_device(dev,nModules * sizeof(unsigned int),stream); modulesInGPU.moduleMap = (unsigned int*)cms::cuda::allocate_device(dev,nModules * 40 * sizeof(unsigned int),stream); modulesInGPU.nConnectedModules = (unsigned int*)cms::cuda::allocate_device(dev,nModules * sizeof(unsigned int),stream); modulesInGPU.drdzs = (float*)cms::cuda::allocate_device(dev,nModules * sizeof(float),stream); modulesInGPU.slopes = (float*)cms::cuda::allocate_device(dev,nModules * sizeof(float),stream); modulesInGPU.nModules = (unsigned int*)cms::cuda::allocate_device(dev,sizeof(unsigned int),stream); modulesInGPU.nLowerModules = (unsigned int*)cms::cuda::allocate_device(dev,sizeof(unsigned int),stream); modulesInGPU.layers = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.rings = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.modules = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.rods = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.subdets = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.sides = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.isInverted = (bool*)cms::cuda::allocate_device(dev,nModules * sizeof(bool),stream); modulesInGPU.isLower = (bool*)cms::cuda::allocate_device(dev,nModules * sizeof(bool),stream); modulesInGPU.nEligibleModules = (unsigned int*)cms::cuda::allocate_device(dev,sizeof(unsigned int),stream); modulesInGPU.nEligibleT5Modules = (unsigned int*)cms::cuda::allocate_device(dev,sizeof(unsigned int),stream); modulesInGPU.hitRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.mdRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.segmentRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.trackletRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.tripletRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.trackCandidateRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.quintupletRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.moduleType = (ModuleType*)cms::cuda::allocate_device(dev,nModules * sizeof(ModuleType),stream); modulesInGPU.moduleLayerType= (ModuleLayerType*)cms::cuda::allocate_device(dev,nModules * sizeof(ModuleLayerType),stream); #else hipMalloc(&(modulesInGPU.detIds),nModules * sizeof(unsigned int)); hipMalloc(&modulesInGPU.moduleMap,nModules * 40 * sizeof(unsigned int)); hipMalloc(&modulesInGPU.nConnectedModules,nModules * sizeof(unsigned int)); hipMalloc(&modulesInGPU.drdzs,nModules * sizeof(float)); hipMalloc(&modulesInGPU.slopes,nModules * sizeof(float)); hipMalloc(&modulesInGPU.nModules,sizeof(unsigned int)); hipMalloc(&modulesInGPU.nLowerModules,sizeof(unsigned int)); hipMalloc(&modulesInGPU.layers,nModules * sizeof(short)); hipMalloc(&modulesInGPU.rings,nModules * sizeof(short)); hipMalloc(&modulesInGPU.modules,nModules * sizeof(short)); hipMalloc(&modulesInGPU.rods,nModules * sizeof(short)); hipMalloc(&modulesInGPU.subdets,nModules * sizeof(short)); hipMalloc(&modulesInGPU.sides,nModules * sizeof(short)); hipMalloc(&modulesInGPU.isInverted, nModules * sizeof(bool)); hipMalloc(&modulesInGPU.isLower, nModules * sizeof(bool)); hipMalloc(&modulesInGPU.nEligibleModules,sizeof(unsigned int)); hipMalloc(&modulesInGPU.nEligibleT5Modules, sizeof(unsigned int)); hipMalloc(&modulesInGPU.hitRanges,nModules * 2 * sizeof(int)); hipMalloc(&modulesInGPU.mdRanges,nModules * 2 * sizeof(int)); hipMalloc(&modulesInGPU.segmentRanges,nModules * 2 * sizeof(int)); hipMalloc(&modulesInGPU.trackletRanges,nModules * 2 * sizeof(int)); hipMalloc(&modulesInGPU.tripletRanges,nModules * 2 * sizeof(int)); hipMalloc(&modulesInGPU.trackCandidateRanges, nModules * 2 * sizeof(int)); hipMalloc(&modulesInGPU.quintupletRanges, nModules * 2 * sizeof(int)); hipMalloc(&modulesInGPU.moduleType,nModules * sizeof(ModuleType)); hipMalloc(&modulesInGPU.moduleLayerType,nModules * sizeof(ModuleLayerType)); #endif hipMemcpy(modulesInGPU.nModules,&nModules,sizeof(unsigned int),hipMemcpyHostToDevice); } void SDL::freeModulesCache(struct modules& modulesInGPU,struct pixelMap& pixelMapping) { #ifdef Explicit_Module int dev; hipGetDevice(&dev); cms::cuda::free_device(dev,modulesInGPU.detIds); cms::cuda::free_device(dev,modulesInGPU.moduleMap); cms::cuda::free_device(dev,modulesInGPU.nConnectedModules); cms::cuda::free_device(dev,modulesInGPU.drdzs); cms::cuda::free_device(dev,modulesInGPU.slopes); cms::cuda::free_device(dev,modulesInGPU.nModules); cms::cuda::free_device(dev,modulesInGPU.nLowerModules); cms::cuda::free_device(dev,modulesInGPU.layers); cms::cuda::free_device(dev,modulesInGPU.rings); cms::cuda::free_device(dev,modulesInGPU.modules); cms::cuda::free_device(dev,modulesInGPU.rods); cms::cuda::free_device(dev,modulesInGPU.subdets); cms::cuda::free_device(dev,modulesInGPU.sides); cms::cuda::free_device(dev,modulesInGPU.isInverted); cms::cuda::free_device(dev,modulesInGPU.isLower); cms::cuda::free_device(dev,modulesInGPU.hitRanges); cms::cuda::free_device(dev,modulesInGPU.mdRanges); cms::cuda::free_device(dev,modulesInGPU.segmentRanges); cms::cuda::free_device(dev,modulesInGPU.trackletRanges); cms::cuda::free_device(dev,modulesInGPU.tripletRanges); cms::cuda::free_device(dev,modulesInGPU.trackCandidateRanges); cms::cuda::free_device(dev,modulesInGPU.quintupletRanges); cms::cuda::free_device(dev,modulesInGPU.moduleType); cms::cuda::free_device(dev,modulesInGPU.moduleLayerType); cms::cuda::free_device(dev,modulesInGPU.lowerModuleIndices); cms::cuda::free_device(dev,modulesInGPU.reverseLookupLowerModuleIndices); cms::cuda::free_device(dev,modulesInGPU.trackCandidateModuleIndices); cms::cuda::free_device(dev,modulesInGPU.quintupletModuleIndices); cms::cuda::free_device(dev,modulesInGPU.nEligibleModules); cms::cuda::free_device(dev,modulesInGPU.nEligibleT5Modules); cms::cuda::free_device(dev,modulesInGPU.connectedPixels); #else cms::cuda::free_managed(modulesInGPU.detIds); cms::cuda::free_managed(modulesInGPU.moduleMap); cms::cuda::free_managed(modulesInGPU.nConnectedModules); cms::cuda::free_managed(modulesInGPU.drdzs); cms::cuda::free_managed(modulesInGPU.slopes); cms::cuda::free_managed(modulesInGPU.nModules); cms::cuda::free_managed(modulesInGPU.nLowerModules); cms::cuda::free_managed(modulesInGPU.layers); cms::cuda::free_managed(modulesInGPU.rings); cms::cuda::free_managed(modulesInGPU.modules); cms::cuda::free_managed(modulesInGPU.rods); cms::cuda::free_managed(modulesInGPU.subdets); cms::cuda::free_managed(modulesInGPU.sides); cms::cuda::free_managed(modulesInGPU.isInverted); cms::cuda::free_managed(modulesInGPU.isLower); cms::cuda::free_managed(modulesInGPU.hitRanges); cms::cuda::free_managed(modulesInGPU.mdRanges); cms::cuda::free_managed(modulesInGPU.segmentRanges); cms::cuda::free_managed(modulesInGPU.trackletRanges); cms::cuda::free_managed(modulesInGPU.tripletRanges); cms::cuda::free_managed(modulesInGPU.trackCandidateRanges); cms::cuda::free_managed(modulesInGPU.quintupletRanges); cms::cuda::free_managed(modulesInGPU.moduleType); cms::cuda::free_managed(modulesInGPU.moduleLayerType); cms::cuda::free_managed(modulesInGPU.lowerModuleIndices); cms::cuda::free_managed(modulesInGPU.reverseLookupLowerModuleIndices); cms::cuda::free_managed(modulesInGPU.trackCandidateModuleIndices); cms::cuda::free_managed(modulesInGPU.quintupletModuleIndices); cms::cuda::free_managed(modulesInGPU.nEligibleModules); cms::cuda::free_managed(modulesInGPU.nEligibleT5Modules); cms::cuda::free_managed(modulesInGPU.connectedPixels); #endif hipHostFree(pixelMapping.connectedPixelsSizes); hipHostFree(pixelMapping.connectedPixelsSizesPos); hipHostFree(pixelMapping.connectedPixelsSizesNeg); hipHostFree(pixelMapping.connectedPixelsIndex); hipHostFree(pixelMapping.connectedPixelsIndexPos); hipHostFree(pixelMapping.connectedPixelsIndexNeg); } void SDL::freeModules(struct modules& modulesInGPU, struct pixelMap& pixelMapping) { hipFree(modulesInGPU.detIds); hipFree(modulesInGPU.moduleMap); hipFree(modulesInGPU.nConnectedModules); hipFree(modulesInGPU.drdzs); hipFree(modulesInGPU.slopes); hipFree(modulesInGPU.nModules); hipFree(modulesInGPU.nLowerModules); hipFree(modulesInGPU.layers); hipFree(modulesInGPU.rings); hipFree(modulesInGPU.modules); hipFree(modulesInGPU.rods); hipFree(modulesInGPU.subdets); hipFree(modulesInGPU.sides); hipFree(modulesInGPU.isInverted); hipFree(modulesInGPU.isLower); hipFree(modulesInGPU.hitRanges); hipFree(modulesInGPU.mdRanges); hipFree(modulesInGPU.segmentRanges); hipFree(modulesInGPU.trackletRanges); hipFree(modulesInGPU.tripletRanges); hipFree(modulesInGPU.trackCandidateRanges); hipFree(modulesInGPU.quintupletRanges); hipFree(modulesInGPU.moduleType); hipFree(modulesInGPU.moduleLayerType); hipFree(modulesInGPU.lowerModuleIndices); hipFree(modulesInGPU.reverseLookupLowerModuleIndices); hipFree(modulesInGPU.trackCandidateModuleIndices); hipFree(modulesInGPU.quintupletModuleIndices); hipFree(modulesInGPU.nEligibleModules); hipFree(modulesInGPU.nEligibleT5Modules); hipFree(modulesInGPU.connectedPixels); hipHostFree(pixelMapping.connectedPixelsSizes); hipHostFree(pixelMapping.connectedPixelsSizesPos); hipHostFree(pixelMapping.connectedPixelsSizesNeg); hipHostFree(pixelMapping.connectedPixelsIndex); hipHostFree(pixelMapping.connectedPixelsIndexPos); hipHostFree(pixelMapping.connectedPixelsIndexNeg); } void SDL::createLowerModuleIndexMapExplicit(struct modules& modulesInGPU, unsigned int nLowerModules, unsigned int nModules,bool* isLower) { //FIXME:some hacks to get the pixel module in the lower modules index without incrementing nLowerModules counter! //Reproduce these hacks in the explicit memory for identical results (or come up with a better method) unsigned int* lowerModuleIndices; int* reverseLookupLowerModuleIndices; hipHostMalloc(&lowerModuleIndices,(nLowerModules + 1) * sizeof(unsigned int)); hipHostMalloc(&reverseLookupLowerModuleIndices,nModules * sizeof(int)); unsigned int lowerModuleCounter = 0; for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); it++) { unsigned int index = it->second; unsigned int detId = it->first; if(isLower[index]) { lowerModuleIndices[lowerModuleCounter] = index; reverseLookupLowerModuleIndices[index] = lowerModuleCounter; lowerModuleCounter++; } else { reverseLookupLowerModuleIndices[index] = -1; } } //hacky stuff "beyond the index" for the pixel module. nLowerModules will *NOT* cover the pixel module! lowerModuleIndices[nLowerModules] = (*detIdToIndex)[1]; reverseLookupLowerModuleIndices[(*detIdToIndex)[1]] = nLowerModules; #ifdef CACHE_ALLOC hipStream_t stream =0; int dev; hipGetDevice(&dev); modulesInGPU.lowerModuleIndices = (unsigned int*)cms::cuda::allocate_device(dev,(nLowerModules + 1) * sizeof(unsigned int),stream); modulesInGPU.reverseLookupLowerModuleIndices = (int*)cms::cuda::allocate_device(dev,nModules * sizeof(int),stream); modulesInGPU.trackCandidateModuleIndices = (int*)cms::cuda::allocate_device(dev,(nLowerModules + 1) * sizeof(int),stream); modulesInGPU.quintupletModuleIndices = (int*)cms::cuda::allocate_device(dev,nLowerModules * sizeof(int),stream); #else hipMalloc(&modulesInGPU.lowerModuleIndices,(nLowerModules + 1) * sizeof(unsigned int)); hipMalloc(&modulesInGPU.reverseLookupLowerModuleIndices,nModules * sizeof(int)); hipMalloc(&modulesInGPU.trackCandidateModuleIndices, (nLowerModules + 1) * sizeof(int)); hipMalloc(&modulesInGPU.quintupletModuleIndices, nLowerModules * sizeof(int)); #endif hipMemcpy(modulesInGPU.lowerModuleIndices,lowerModuleIndices,sizeof(unsigned int)*(nLowerModules+1),hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.reverseLookupLowerModuleIndices,reverseLookupLowerModuleIndices,sizeof(int)*nModules,hipMemcpyHostToDevice); hipHostFree(lowerModuleIndices); hipHostFree(reverseLookupLowerModuleIndices); } void SDL::createLowerModuleIndexMap(struct modules& modulesInGPU, unsigned int nLowerModules, unsigned int nModules) { //FIXME:some hacks to get the pixel module in the lower modules index without incrementing nLowerModules counter! //Reproduce these hacks in the explicit memory for identical results (or come up with a better method) #ifdef CACHE_ALLOC hipStream_t stream =0; modulesInGPU.lowerModuleIndices = (unsigned int*)cms::cuda::allocate_managed((nLowerModules + 1) * sizeof(unsigned int),stream); modulesInGPU.reverseLookupLowerModuleIndices = (int*)cms::cuda::allocate_managed(nModules * sizeof(int),stream); modulesInGPU.trackCandidateModuleIndices = (int*)cms::cuda::allocate_managed((nLowerModules + 1) * sizeof(int),stream); modulesInGPU.quintupletModuleIndices = (int*)cms::cuda::allocate_managed(nLowerModules * sizeof(int),stream); #else hipMallocManaged(&modulesInGPU.lowerModuleIndices,(nLowerModules + 1) * sizeof(unsigned int)); hipMallocManaged(&modulesInGPU.reverseLookupLowerModuleIndices,nModules * sizeof(int)); hipMallocManaged(&modulesInGPU.trackCandidateModuleIndices, (nLowerModules + 1) * sizeof(int)); hipMallocManaged(&modulesInGPU.quintupletModuleIndices, nLowerModules * sizeof(int)); #endif unsigned int lowerModuleCounter = 0; for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); it++) { unsigned int index = it->second; unsigned int detId = it->first; if(modulesInGPU.isLower[index]) { modulesInGPU.lowerModuleIndices[lowerModuleCounter] = index; modulesInGPU.reverseLookupLowerModuleIndices[index] = lowerModuleCounter; lowerModuleCounter++; } else { modulesInGPU.reverseLookupLowerModuleIndices[index] = -1; } } //hacky stuff "beyond the index" for the pixel module. nLowerModules will *NOT* cover the pixel module! modulesInGPU.lowerModuleIndices[nLowerModules] = (*detIdToIndex)[1]; modulesInGPU.reverseLookupLowerModuleIndices[(*detIdToIndex)[1]] = nLowerModules; } void SDL::loadModulesFromFile(struct modules& modulesInGPU, unsigned int& nModules, struct pixelMap& pixelMapping, const char* moduleMetaDataFilePath) { detIdToIndex = new std::map<unsigned int, unsigned int>; /*modules structure object will be created in Event.cu*/ /* Load the whole text file into the unordered_map first*/ std::ifstream ifile; ifile.open(moduleMetaDataFilePath); if(!ifile.is_open()) { std::cout<<"ERROR! module list file not present!"<<std::endl; } std::string line; unsigned int counter = 0; while(std::getline(ifile,line)) { std::stringstream ss(line); std::string token; bool flag = 0; while(std::getline(ss,token,',')) { if(flag == 1) break; (*detIdToIndex)[stoi(token)] = counter; flag = 1; counter++; } } (*detIdToIndex)[1] = counter; //pixel module is the last module in the module list counter++; nModules = counter; std::cout<<"Number of modules = "<<nModules<<std::endl; #ifdef Explicit_Module createModulesInExplicitMemory(modulesInGPU,nModules); unsigned int* lowerModuleCounter;// = 0; hipHostMalloc(&lowerModuleCounter,sizeof(unsigned int)); hipMemset(lowerModuleCounter,0,sizeof(unsigned int)); unsigned int* host_detIds; short* host_layers; short* host_rings; short* host_rods; short* host_modules; short* host_subdets; short* host_sides; bool* host_isInverted; bool* host_isLower; ModuleType* host_moduleType; ModuleLayerType* host_moduleLayerType; float* host_slopes; float* host_drdzs; hipHostMalloc(&host_detIds,sizeof(unsigned int)*nModules); hipHostMalloc(&host_layers,sizeof(short)*nModules); hipHostMalloc(&host_rings,sizeof(short)*nModules); hipHostMalloc(&host_rods,sizeof(short)*nModules); hipHostMalloc(&host_modules,sizeof(short)*nModules); hipHostMalloc(&host_subdets,sizeof(short)*nModules); hipHostMalloc(&host_sides,sizeof(short)*nModules); hipHostMalloc(&host_isInverted,sizeof(bool)*nModules); hipHostMalloc(&host_isLower,sizeof(bool)*nModules); hipHostMalloc(&host_moduleType,sizeof(ModuleType)*nModules); hipHostMalloc(&host_moduleLayerType,sizeof(ModuleLayerType)*nModules); hipHostMalloc(&host_slopes,sizeof(float)*nModules); hipHostMalloc(&host_drdzs,sizeof(float)*nModules); for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); it++) { unsigned int detId = it->first; unsigned int index = it->second; host_detIds[index] = detId; if(detId == 1) { host_layers[index] = 0; host_rings[index] = 0; host_rods[index] = 0; host_modules[index] = 0; host_subdets[index] = SDL::InnerPixel; host_sides[index] = 0; host_isInverted[index] = 0; host_isLower[index] = false; host_moduleType[index] = PixelModule; host_moduleLayerType[index] = SDL::InnerPixelLayer; host_slopes[index] = 0; host_drdzs[index] = 0; } else { unsigned short layer,ring,rod,module,subdet,side; setDerivedQuantities(detId,layer,ring,rod,module,subdet,side); host_layers[index] = layer; host_rings[index] = ring; host_rods[index] = rod; host_modules[index] = module; host_subdets[index] = subdet; host_sides[index] = side; host_isInverted[index] = modulesInGPU.parseIsInverted(index,subdet, side,module,layer); host_isLower[index] = modulesInGPU.parseIsLower(index, host_isInverted[index], detId); host_moduleType[index] = modulesInGPU.parseModuleType(index, subdet, layer, ring); host_moduleLayerType[index] = modulesInGPU.parseModuleLayerType(index, host_moduleType[index],host_isInverted[index],host_isLower[index]); host_slopes[index] = (subdet == Endcap) ? endcapGeometry.getSlopeLower(detId) : tiltedGeometry.getSlope(detId); host_drdzs[index] = (subdet == Barrel) ? tiltedGeometry.getDrDz(detId) : 0; } lowerModuleCounter[0] += host_isLower[index]; } hipMemcpy(modulesInGPU.nLowerModules,lowerModuleCounter,sizeof(unsigned int),hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.detIds,host_detIds,nModules*sizeof(unsigned int),hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.layers,host_layers,nModules*sizeof(short),hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.rings,host_rings,sizeof(short)*nModules,hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.rods,host_rods,sizeof(short)*nModules,hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.modules,host_modules,sizeof(short)*nModules,hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.subdets,host_subdets,sizeof(short)*nModules,hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.sides,host_sides,sizeof(short)*nModules,hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.isInverted,host_isInverted,sizeof(bool)*nModules,hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.isLower,host_isLower,sizeof(bool)*nModules,hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.moduleType,host_moduleType,sizeof(ModuleType)*nModules,hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.moduleLayerType,host_moduleLayerType,sizeof(ModuleLayerType)*nModules,hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.slopes,host_slopes,sizeof(float)*nModules,hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.drdzs,host_drdzs,sizeof(float)*nModules,hipMemcpyHostToDevice); hipHostFree(host_detIds); hipHostFree(host_layers); hipHostFree(host_rings); hipHostFree(host_rods); hipHostFree(host_modules); hipHostFree(host_subdets); hipHostFree(host_sides); hipHostFree(host_isInverted); hipHostFree(host_isLower); hipHostFree(host_moduleType); hipHostFree(host_moduleLayerType); hipHostFree(host_slopes); hipHostFree(host_drdzs); hipHostFree(lowerModuleCounter); std::cout<<"number of lower modules (without fake pixel module)= "<<lowerModuleCounter[0]<<std::endl; createLowerModuleIndexMapExplicit(modulesInGPU,lowerModuleCounter[0], nModules,host_isLower); fillConnectedModuleArrayExplicit(modulesInGPU,nModules); fillPixelMap(modulesInGPU,pixelMapping); resetObjectRanges(modulesInGPU,nModules); #else createModulesInUnifiedMemory(modulesInGPU,nModules); unsigned int lowerModuleCounter = 0; for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); it++) { unsigned int detId = it->first; unsigned int index = it->second; modulesInGPU.detIds[index] = detId; if(detId == 1) { modulesInGPU.layers[index] = 0; modulesInGPU.rings[index] = 0; modulesInGPU.rods[index] = 0; modulesInGPU.modules[index] = 0; modulesInGPU.subdets[index] = SDL::InnerPixel; modulesInGPU.sides[index] = 0; modulesInGPU.isInverted[index] = 0; modulesInGPU.isLower[index] = false; modulesInGPU.moduleType[index] = PixelModule; modulesInGPU.moduleLayerType[index] = SDL::InnerPixelLayer; modulesInGPU.slopes[index] = 0; modulesInGPU.drdzs[index] = 0; } else { unsigned short layer,ring,rod,module,subdet,side; setDerivedQuantities(detId,layer,ring,rod,module,subdet,side); modulesInGPU.layers[index] = layer; modulesInGPU.rings[index] = ring; modulesInGPU.rods[index] = rod; modulesInGPU.modules[index] = module; modulesInGPU.subdets[index] = subdet; modulesInGPU.sides[index] = side; modulesInGPU.isInverted[index] = modulesInGPU.parseIsInverted(index); modulesInGPU.isLower[index] = modulesInGPU.parseIsLower(index); modulesInGPU.moduleType[index] = modulesInGPU.parseModuleType(index); modulesInGPU.moduleLayerType[index] = modulesInGPU.parseModuleLayerType(index); modulesInGPU.slopes[index] = (subdet == Endcap) ? endcapGeometry.getSlopeLower(detId) : tiltedGeometry.getSlope(detId); modulesInGPU.drdzs[index] = (subdet == Barrel) ? tiltedGeometry.getDrDz(detId) : 0; } if(modulesInGPU.isLower[index]) lowerModuleCounter++; } *modulesInGPU.nLowerModules = lowerModuleCounter; std::cout<<"number of lower modules (without fake pixel module)= "<<*modulesInGPU.nLowerModules<<std::endl; createLowerModuleIndexMap(modulesInGPU,lowerModuleCounter, nModules); fillConnectedModuleArray(modulesInGPU,nModules); fillPixelMap(modulesInGPU,pixelMapping); resetObjectRanges(modulesInGPU,nModules); #endif } void SDL::fillPixelMap(struct modules& modulesInGPU, struct pixelMap& pixelMapping) { int size_superbins = 45000;//SDL::moduleConnectionMap_pLStoLayer1Subdet5.size(); //changed to 45000 to reduce memory useage on GPU std::vector<unsigned int> connectedModuleDetIds; std::vector<unsigned int> connectedModuleDetIds_pos; std::vector<unsigned int> connectedModuleDetIds_neg; unsigned int* connectedPixelsIndex; unsigned int* connectedPixelsIndexPos; unsigned int* connectedPixelsIndexNeg; unsigned int* connectedPixelsSizes; unsigned int* connectedPixelsSizesPos; unsigned int* connectedPixelsSizesNeg; hipHostMalloc(&pixelMapping.connectedPixelsIndex,size_superbins * sizeof(unsigned int)); hipHostMalloc(&pixelMapping.connectedPixelsSizes,size_superbins * sizeof(unsigned int)); hipHostMalloc(&pixelMapping.connectedPixelsIndexPos,size_superbins * sizeof(unsigned int)); hipHostMalloc(&pixelMapping.connectedPixelsSizesPos,size_superbins * sizeof(unsigned int)); hipHostMalloc(&pixelMapping.connectedPixelsIndexNeg,size_superbins * sizeof(unsigned int)); hipHostMalloc(&pixelMapping.connectedPixelsSizesNeg,size_superbins * sizeof(unsigned int)); int totalSizes=0; int totalSizes_pos=0; int totalSizes_neg=0; for(int isuperbin =0; isuperbin<size_superbins; isuperbin++) { std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet5 = SDL::moduleConnectionMap_pLStoLayer1Subdet5.getConnectedModuleDetIds(isuperbin+size_superbins);// index adjustment to get high values std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet5 = SDL::moduleConnectionMap_pLStoLayer2Subdet5.getConnectedModuleDetIds(isuperbin+size_superbins);// from the high pt bins std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet5 = SDL::moduleConnectionMap_pLStoLayer3Subdet5.getConnectedModuleDetIds(isuperbin+size_superbins); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet4 = SDL::moduleConnectionMap_pLStoLayer1Subdet4.getConnectedModuleDetIds(isuperbin+size_superbins); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet4 = SDL::moduleConnectionMap_pLStoLayer2Subdet4.getConnectedModuleDetIds(isuperbin+size_superbins); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet4 = SDL::moduleConnectionMap_pLStoLayer3Subdet4.getConnectedModuleDetIds(isuperbin+size_superbins); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer4Subdet4 = SDL::moduleConnectionMap_pLStoLayer4Subdet4.getConnectedModuleDetIds(isuperbin+size_superbins); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer1Subdet5.begin(),connectedModuleDetIds_pLStoLayer1Subdet5.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer2Subdet5.begin(),connectedModuleDetIds_pLStoLayer2Subdet5.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer3Subdet5.begin(),connectedModuleDetIds_pLStoLayer3Subdet5.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer1Subdet4.begin(),connectedModuleDetIds_pLStoLayer1Subdet4.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer2Subdet4.begin(),connectedModuleDetIds_pLStoLayer2Subdet4.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer3Subdet4.begin(),connectedModuleDetIds_pLStoLayer3Subdet4.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer4Subdet4.begin(),connectedModuleDetIds_pLStoLayer4Subdet4.end()); int sizes =0; sizes += connectedModuleDetIds_pLStoLayer1Subdet5.size(); sizes += connectedModuleDetIds_pLStoLayer2Subdet5.size(); sizes += connectedModuleDetIds_pLStoLayer3Subdet5.size(); sizes += connectedModuleDetIds_pLStoLayer1Subdet4.size(); sizes += connectedModuleDetIds_pLStoLayer2Subdet4.size(); sizes += connectedModuleDetIds_pLStoLayer3Subdet4.size(); sizes += connectedModuleDetIds_pLStoLayer4Subdet4.size(); pixelMapping.connectedPixelsIndex[isuperbin] = totalSizes; pixelMapping.connectedPixelsSizes[isuperbin] = sizes; totalSizes += sizes; std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet5_pos = SDL::moduleConnectionMap_pLStoLayer1Subdet5_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet5_pos = SDL::moduleConnectionMap_pLStoLayer2Subdet5_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet5_pos = SDL::moduleConnectionMap_pLStoLayer3Subdet5_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet4_pos = SDL::moduleConnectionMap_pLStoLayer1Subdet4_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet4_pos = SDL::moduleConnectionMap_pLStoLayer2Subdet4_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet4_pos = SDL::moduleConnectionMap_pLStoLayer3Subdet4_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer4Subdet4_pos = SDL::moduleConnectionMap_pLStoLayer4Subdet4_pos.getConnectedModuleDetIds(isuperbin); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer1Subdet5_pos.begin(),connectedModuleDetIds_pLStoLayer1Subdet5_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer2Subdet5_pos.begin(),connectedModuleDetIds_pLStoLayer2Subdet5_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer3Subdet5_pos.begin(),connectedModuleDetIds_pLStoLayer3Subdet5_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer1Subdet4_pos.begin(),connectedModuleDetIds_pLStoLayer1Subdet4_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer2Subdet4_pos.begin(),connectedModuleDetIds_pLStoLayer2Subdet4_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer3Subdet4_pos.begin(),connectedModuleDetIds_pLStoLayer3Subdet4_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer4Subdet4_pos.begin(),connectedModuleDetIds_pLStoLayer4Subdet4_pos.end()); int sizes_pos =0; sizes_pos += connectedModuleDetIds_pLStoLayer1Subdet5_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer2Subdet5_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer3Subdet5_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer1Subdet4_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer2Subdet4_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer3Subdet4_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer4Subdet4_pos.size(); pixelMapping.connectedPixelsIndexPos[isuperbin] = totalSizes_pos; pixelMapping.connectedPixelsSizesPos[isuperbin] = sizes_pos; totalSizes_pos += sizes_pos; std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet5_neg = SDL::moduleConnectionMap_pLStoLayer1Subdet5_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet5_neg = SDL::moduleConnectionMap_pLStoLayer2Subdet5_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet5_neg = SDL::moduleConnectionMap_pLStoLayer3Subdet5_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet4_neg = SDL::moduleConnectionMap_pLStoLayer1Subdet4_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet4_neg = SDL::moduleConnectionMap_pLStoLayer2Subdet4_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet4_neg = SDL::moduleConnectionMap_pLStoLayer3Subdet4_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer4Subdet4_neg = SDL::moduleConnectionMap_pLStoLayer4Subdet4_neg.getConnectedModuleDetIds(isuperbin); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer1Subdet5_neg.begin(),connectedModuleDetIds_pLStoLayer1Subdet5_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer2Subdet5_neg.begin(),connectedModuleDetIds_pLStoLayer2Subdet5_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer3Subdet5_neg.begin(),connectedModuleDetIds_pLStoLayer3Subdet5_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer1Subdet4_neg.begin(),connectedModuleDetIds_pLStoLayer1Subdet4_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer2Subdet4_neg.begin(),connectedModuleDetIds_pLStoLayer2Subdet4_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer3Subdet4_neg.begin(),connectedModuleDetIds_pLStoLayer3Subdet4_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer4Subdet4_neg.begin(),connectedModuleDetIds_pLStoLayer4Subdet4_neg.end()); int sizes_neg =0; sizes_neg += connectedModuleDetIds_pLStoLayer1Subdet5_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer2Subdet5_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer3Subdet5_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer1Subdet4_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer2Subdet4_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer3Subdet4_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer4Subdet4_neg.size(); pixelMapping.connectedPixelsIndexNeg[isuperbin] = totalSizes_neg; pixelMapping.connectedPixelsSizesNeg[isuperbin] = sizes_neg; totalSizes_neg += sizes_neg; } unsigned int* connectedPixels; hipHostMalloc(&connectedPixels,(totalSizes+totalSizes_pos+totalSizes_neg) * sizeof(unsigned int)); #ifdef CACHE_ALLOC hipStream_t stream=0; #ifdef Explicit_Module int dev; hipGetDevice(&dev); modulesInGPU.connectedPixels = (unsigned int*)cms::cuda::allocate_device(dev,(totalSizes+totalSizes_pos+totalSizes_neg) * sizeof(unsigned int),stream); #else modulesInGPU.connectedPixels = (unsigned int*)cms::cuda::allocate_managed((totalSizes+totalSizes_pos+totalSizes_neg) * sizeof(unsigned int),stream); #endif #else #ifdef Explicit_Module hipMalloc(&modulesInGPU.connectedPixels,(totalSizes+totalSizes_pos+totalSizes_neg)* sizeof(unsigned int)); #else hipMallocManaged(&modulesInGPU.connectedPixels,(totalSizes+totalSizes_pos+totalSizes_neg)* sizeof(unsigned int)); #endif #endif for(int icondet=0; icondet< totalSizes; icondet++){ connectedPixels[icondet] = (*detIdToIndex)[connectedModuleDetIds[icondet]]; } for(int icondet=0; icondet< totalSizes_pos; icondet++){ connectedPixels[icondet+totalSizes] = (*detIdToIndex)[connectedModuleDetIds_pos[icondet]]; } for(int icondet=0; icondet< totalSizes_neg; icondet++){ connectedPixels[icondet+totalSizes+totalSizes_pos] = (*detIdToIndex)[connectedModuleDetIds_neg[icondet]]; } hipMemcpy(modulesInGPU.connectedPixels,connectedPixels,(totalSizes+totalSizes_pos+totalSizes_neg)*sizeof(unsigned int),hipMemcpyHostToDevice); hipHostFree(connectedPixels); } void SDL::fillConnectedModuleArrayExplicit(struct modules& modulesInGPU, unsigned int nModules) { unsigned int* moduleMap; unsigned int* nConnectedModules; hipHostMalloc(&moduleMap,nModules * 40 * sizeof(unsigned int)); hipHostMalloc(&nConnectedModules,nModules * sizeof(unsigned int)); for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); ++it) { unsigned int detId = it->first; unsigned int index = it->second; auto& connectedModules = moduleConnectionMap.getConnectedModuleDetIds(detId); nConnectedModules[index] = connectedModules.size(); for(unsigned int i = 0; i< nConnectedModules[index];i++) { moduleMap[index * 40 + i] = (*detIdToIndex)[connectedModules[i]]; } } hipMemcpy(modulesInGPU.moduleMap,moduleMap,nModules*40*sizeof(unsigned int),hipMemcpyHostToDevice); hipMemcpy(modulesInGPU.nConnectedModules,nConnectedModules,nModules*sizeof(unsigned int),hipMemcpyHostToDevice); hipHostFree(moduleMap); hipHostFree(nConnectedModules); } void SDL::fillConnectedModuleArray(struct modules& modulesInGPU, unsigned int nModules) { for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); ++it) { unsigned int detId = it->first; unsigned int index = it->second; auto& connectedModules = moduleConnectionMap.getConnectedModuleDetIds(detId); modulesInGPU.nConnectedModules[index] = connectedModules.size(); for(unsigned int i = 0; i< modulesInGPU.nConnectedModules[index];i++) { modulesInGPU.moduleMap[index * 40 + i] = (*detIdToIndex)[connectedModules[i]]; } } } void SDL::setDerivedQuantities(unsigned int detId, unsigned short& layer, unsigned short& ring, unsigned short& rod, unsigned short& module, unsigned short& subdet, unsigned short& side) { subdet = (detId & (7 << 25)) >> 25; side = (subdet == Endcap) ? (detId & (3 << 23)) >> 23 : (detId & (3 << 18)) >> 18; layer = (subdet == Endcap) ? (detId & (7 << 18)) >> 18 : (detId & (7 << 20)) >> 20; ring = (subdet == Endcap) ? (detId & (15 << 12)) >> 12 : 0; module = (detId & (127 << 2)) >> 2; rod = (subdet == Endcap) ? 0 : (detId & (127 << 10)) >> 10; } //auxilliary functions - will be called as needed bool SDL::modules::parseIsInverted(unsigned int index) { if (subdets[index] == Endcap) { if (sides[index] == NegZ) { return modules[index] % 2 == 1; } else if (sides[index] == PosZ) { return modules[index] % 2 == 0; } else { return 0; } } else if (subdets[index] == Barrel) { if (sides[index] == Center) { if (layers[index] <= 3) { return modules[index] % 2 == 1; } else if (layers[index] >= 4) { return modules[index] % 2 == 0; } else { return 0; } } else if (sides[index] == NegZ or sides[index] == PosZ) { if (layers[index] <= 2) { return modules[index] % 2 == 1; } else if (layers[index] == 3) { return modules[index] % 2 == 0; } else { return 0; } } else { return 0; } } else { return 0; } } bool SDL::modules::parseIsInverted(unsigned int index, short subdet, short side, short module, short layer) { if (subdet == Endcap) { if (side == NegZ) { return module % 2 == 1; } else if (side == PosZ) { return module % 2 == 0; } else { return 0; } } else if (subdet == Barrel) { if (side == Center) { if (layer <= 3) { return module % 2 == 1; } else if (layer >= 4) { return module % 2 == 0; } else { return 0; } } else if (side == NegZ or side == PosZ) { if (layer <= 2) { return module % 2 == 1; } else if (layer == 3) { return module % 2 == 0; } else { return 0; } } else { return 0; } } else { return 0; } } bool SDL::modules::parseIsLower(unsigned int index, bool isInvertedx, unsigned int detId) { return (isInvertedx) ? !(detId & 1) : (detId & 1); } bool SDL::modules::parseIsLower(unsigned int index) { return (isInverted[index]) ? !(detIds[index] & 1) : (detIds[index] & 1); } unsigned int SDL::modules::partnerModuleIndexExplicit(unsigned int index, bool isLowerx, bool isInvertedx) { /*We need to ensure modules with successive det Ids are right next to each other or we're dead*/ if(isLowerx) { return (isInvertedx ? index - 1: index + 1); } else { return (isInvertedx ? index + 1 : index - 1); } } unsigned int SDL::modules::partnerModuleIndex(unsigned int index) { /*We need to ensure modules with successive det Ids are right next to each other or we're dead*/ if(isLower[index]) { return (isInverted[index] ? index - 1: index + 1); } else { return (isInverted[index] ? index + 1 : index - 1); } } SDL::ModuleType SDL::modules::parseModuleType(unsigned int index, short subdet, short layer, short ring) { if(subdet == Barrel) { if(layer <= 3) { return PS; } else { return TwoS; } } else { if(layer <= 2) { if(ring <= 10) { return PS; } else { return TwoS; } } else { if(ring <= 7) { return PS; } else { return TwoS; } } } } SDL::ModuleType SDL::modules::parseModuleType(unsigned int index) { if(subdets[index] == Barrel) { if(layers[index] <= 3) { return PS; } else { return TwoS; } } else { if(layers[index] <= 2) { if(rings[index] <= 10) { return PS; } else { return TwoS; } } else { if(rings[index] <= 7) { return PS; } else { return TwoS; } } } } SDL::ModuleLayerType SDL::modules::parseModuleLayerType(unsigned int index, ModuleType moduleTypex,bool isInvertedx, bool isLowerx) { if(moduleTypex == TwoS) { return Strip; } if(isInvertedx) { if(isLowerx) { return Strip; } else { return Pixel; } } else { if(isLowerx) { return Pixel; } else { return Strip; } } } SDL::ModuleLayerType SDL::modules::parseModuleLayerType(unsigned int index) { if(moduleType[index] == TwoS) { return Strip; } if(isInverted[index]) { if(isLower[index]) { return Strip; } else { return Pixel; } } else { if(isLower[index]) { return Pixel; } else { return Strip; } } } void SDL::resetObjectRanges(struct modules& modulesInGPU, unsigned int nModules) { #ifdef Explicit_Module hipMemset(modulesInGPU.hitRanges, -1,nModules*2*sizeof(int)); hipMemset(modulesInGPU.mdRanges, -1,nModules*2*sizeof(int)); hipMemset(modulesInGPU.segmentRanges, -1,nModules*2*sizeof(int)); hipMemset(modulesInGPU.trackletRanges, -1,nModules*2*sizeof(int)); hipMemset(modulesInGPU.tripletRanges, -1,nModules*2*sizeof(int)); hipMemset(modulesInGPU.trackCandidateRanges, -1,nModules*2*sizeof(int)); hipMemset(modulesInGPU.quintupletRanges, -1, nModules*2*sizeof(int)); #else #pragma omp parallel for default(shared) for(size_t i = 0; i<nModules *2; i++) { modulesInGPU.hitRanges[i] = -1; modulesInGPU.mdRanges[i] = -1; modulesInGPU.segmentRanges[i] = -1; modulesInGPU.trackletRanges[i] = -1; modulesInGPU.tripletRanges[i] = -1; modulesInGPU.trackCandidateRanges[i] = -1; modulesInGPU.quintupletRanges[i] = -1; } #endif }
Module.cu
# include "Module.cuh" #include "ModuleConnectionMap.h" #include "allocate.h" std::map <unsigned int, unsigned int> *SDL::detIdToIndex; void SDL::createModulesInUnifiedMemory(struct modules& modulesInGPU,unsigned int nModules) { /* modules stucture object will be created in Event.cu*/ #ifdef CACHE_ALLOC cudaStream_t stream=0; modulesInGPU.detIds = (unsigned int*)cms::cuda::allocate_managed(nModules * sizeof(unsigned int),stream); modulesInGPU.moduleMap = (unsigned int*)cms::cuda::allocate_managed(nModules * 40 * sizeof(unsigned int),stream); modulesInGPU.nConnectedModules = (unsigned int*)cms::cuda::allocate_managed(nModules * sizeof(unsigned int),stream); modulesInGPU.drdzs = (float*)cms::cuda::allocate_managed(nModules * sizeof(float),stream); modulesInGPU.slopes = (float*)cms::cuda::allocate_managed(nModules * sizeof(float),stream); modulesInGPU.nModules = (unsigned int*)cms::cuda::allocate_managed(sizeof(unsigned int),stream); modulesInGPU.nLowerModules = (unsigned int*)cms::cuda::allocate_managed(sizeof(unsigned int),stream); modulesInGPU.layers = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.rings = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.modules = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.rods = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.subdets = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.sides = (short*)cms::cuda::allocate_managed(nModules * sizeof(short),stream); modulesInGPU.isInverted = (bool*)cms::cuda::allocate_managed(nModules * sizeof(bool),stream); modulesInGPU.isLower = (bool*)cms::cuda::allocate_managed(nModules * sizeof(bool),stream); modulesInGPU.nEligibleModules = (unsigned int*)cms::cuda::allocate_managed(sizeof(unsigned int),stream); modulesInGPU.nEligibleT5Modules = (unsigned int*)cms::cuda::allocate_managed(sizeof(unsigned int),stream); modulesInGPU.hitRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.mdRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.segmentRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.trackletRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.tripletRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.trackCandidateRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.quintupletRanges = (int*)cms::cuda::allocate_managed(nModules * 2 * sizeof(int),stream); modulesInGPU.moduleType = (ModuleType*)cms::cuda::allocate_managed(nModules * sizeof(ModuleType),stream); modulesInGPU.moduleLayerType=(ModuleLayerType*)cms::cuda::allocate_managed(nModules * sizeof(ModuleLayerType),stream); #else cudaMallocManaged(&modulesInGPU.detIds,nModules * sizeof(unsigned int)); cudaMallocManaged(&modulesInGPU.moduleMap,nModules * 40 * sizeof(unsigned int)); cudaMallocManaged(&modulesInGPU.nConnectedModules,nModules * sizeof(unsigned int)); cudaMallocManaged(&modulesInGPU.drdzs,nModules * sizeof(float)); cudaMallocManaged(&modulesInGPU.slopes,nModules * sizeof(float)); cudaMallocManaged(&modulesInGPU.nModules,sizeof(unsigned int)); cudaMallocManaged(&modulesInGPU.nLowerModules,sizeof(unsigned int)); cudaMallocManaged(&modulesInGPU.layers,nModules * sizeof(short)); cudaMallocManaged(&modulesInGPU.rings,nModules * sizeof(short)); cudaMallocManaged(&modulesInGPU.modules,nModules * sizeof(short)); cudaMallocManaged(&modulesInGPU.rods,nModules * sizeof(short)); cudaMallocManaged(&modulesInGPU.subdets,nModules * sizeof(short)); cudaMallocManaged(&modulesInGPU.sides,nModules * sizeof(short)); cudaMallocManaged(&modulesInGPU.isInverted, nModules * sizeof(bool)); cudaMallocManaged(&modulesInGPU.isLower, nModules * sizeof(bool)); cudaMallocManaged(&modulesInGPU.nEligibleModules,sizeof(unsigned int)); cudaMallocManaged(&modulesInGPU.nEligibleT5Modules, sizeof(unsigned int)); cudaMallocManaged(&modulesInGPU.hitRanges,nModules * 2 * sizeof(int)); cudaMallocManaged(&modulesInGPU.mdRanges,nModules * 2 * sizeof(int)); cudaMallocManaged(&modulesInGPU.segmentRanges,nModules * 2 * sizeof(int)); cudaMallocManaged(&modulesInGPU.trackletRanges,nModules * 2 * sizeof(int)); cudaMallocManaged(&modulesInGPU.tripletRanges,nModules * 2 * sizeof(int)); cudaMallocManaged(&modulesInGPU.trackCandidateRanges, nModules * 2 * sizeof(int)); cudaMallocManaged(&modulesInGPU.quintupletRanges, nModules * 2 * sizeof(int)); cudaMallocManaged(&modulesInGPU.moduleType,nModules * sizeof(ModuleType)); cudaMallocManaged(&modulesInGPU.moduleLayerType,nModules * sizeof(ModuleLayerType)); #endif *modulesInGPU.nModules = nModules; } void SDL::createModulesInExplicitMemory(struct modules& modulesInGPU,unsigned int nModules) { /* modules stucture object will be created in Event.cu*/ #ifdef CACHE_ALLOC cudaStream_t stream=0; int dev; cudaGetDevice(&dev); modulesInGPU.detIds = (unsigned int*)cms::cuda::allocate_device(dev,nModules * sizeof(unsigned int),stream); modulesInGPU.moduleMap = (unsigned int*)cms::cuda::allocate_device(dev,nModules * 40 * sizeof(unsigned int),stream); modulesInGPU.nConnectedModules = (unsigned int*)cms::cuda::allocate_device(dev,nModules * sizeof(unsigned int),stream); modulesInGPU.drdzs = (float*)cms::cuda::allocate_device(dev,nModules * sizeof(float),stream); modulesInGPU.slopes = (float*)cms::cuda::allocate_device(dev,nModules * sizeof(float),stream); modulesInGPU.nModules = (unsigned int*)cms::cuda::allocate_device(dev,sizeof(unsigned int),stream); modulesInGPU.nLowerModules = (unsigned int*)cms::cuda::allocate_device(dev,sizeof(unsigned int),stream); modulesInGPU.layers = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.rings = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.modules = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.rods = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.subdets = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.sides = (short*)cms::cuda::allocate_device(dev,nModules * sizeof(short),stream); modulesInGPU.isInverted = (bool*)cms::cuda::allocate_device(dev,nModules * sizeof(bool),stream); modulesInGPU.isLower = (bool*)cms::cuda::allocate_device(dev,nModules * sizeof(bool),stream); modulesInGPU.nEligibleModules = (unsigned int*)cms::cuda::allocate_device(dev,sizeof(unsigned int),stream); modulesInGPU.nEligibleT5Modules = (unsigned int*)cms::cuda::allocate_device(dev,sizeof(unsigned int),stream); modulesInGPU.hitRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.mdRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.segmentRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.trackletRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.tripletRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.trackCandidateRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.quintupletRanges = (int*)cms::cuda::allocate_device(dev,nModules * 2 * sizeof(int),stream); modulesInGPU.moduleType = (ModuleType*)cms::cuda::allocate_device(dev,nModules * sizeof(ModuleType),stream); modulesInGPU.moduleLayerType= (ModuleLayerType*)cms::cuda::allocate_device(dev,nModules * sizeof(ModuleLayerType),stream); #else cudaMalloc(&(modulesInGPU.detIds),nModules * sizeof(unsigned int)); cudaMalloc(&modulesInGPU.moduleMap,nModules * 40 * sizeof(unsigned int)); cudaMalloc(&modulesInGPU.nConnectedModules,nModules * sizeof(unsigned int)); cudaMalloc(&modulesInGPU.drdzs,nModules * sizeof(float)); cudaMalloc(&modulesInGPU.slopes,nModules * sizeof(float)); cudaMalloc(&modulesInGPU.nModules,sizeof(unsigned int)); cudaMalloc(&modulesInGPU.nLowerModules,sizeof(unsigned int)); cudaMalloc(&modulesInGPU.layers,nModules * sizeof(short)); cudaMalloc(&modulesInGPU.rings,nModules * sizeof(short)); cudaMalloc(&modulesInGPU.modules,nModules * sizeof(short)); cudaMalloc(&modulesInGPU.rods,nModules * sizeof(short)); cudaMalloc(&modulesInGPU.subdets,nModules * sizeof(short)); cudaMalloc(&modulesInGPU.sides,nModules * sizeof(short)); cudaMalloc(&modulesInGPU.isInverted, nModules * sizeof(bool)); cudaMalloc(&modulesInGPU.isLower, nModules * sizeof(bool)); cudaMalloc(&modulesInGPU.nEligibleModules,sizeof(unsigned int)); cudaMalloc(&modulesInGPU.nEligibleT5Modules, sizeof(unsigned int)); cudaMalloc(&modulesInGPU.hitRanges,nModules * 2 * sizeof(int)); cudaMalloc(&modulesInGPU.mdRanges,nModules * 2 * sizeof(int)); cudaMalloc(&modulesInGPU.segmentRanges,nModules * 2 * sizeof(int)); cudaMalloc(&modulesInGPU.trackletRanges,nModules * 2 * sizeof(int)); cudaMalloc(&modulesInGPU.tripletRanges,nModules * 2 * sizeof(int)); cudaMalloc(&modulesInGPU.trackCandidateRanges, nModules * 2 * sizeof(int)); cudaMalloc(&modulesInGPU.quintupletRanges, nModules * 2 * sizeof(int)); cudaMalloc(&modulesInGPU.moduleType,nModules * sizeof(ModuleType)); cudaMalloc(&modulesInGPU.moduleLayerType,nModules * sizeof(ModuleLayerType)); #endif cudaMemcpy(modulesInGPU.nModules,&nModules,sizeof(unsigned int),cudaMemcpyHostToDevice); } void SDL::freeModulesCache(struct modules& modulesInGPU,struct pixelMap& pixelMapping) { #ifdef Explicit_Module int dev; cudaGetDevice(&dev); cms::cuda::free_device(dev,modulesInGPU.detIds); cms::cuda::free_device(dev,modulesInGPU.moduleMap); cms::cuda::free_device(dev,modulesInGPU.nConnectedModules); cms::cuda::free_device(dev,modulesInGPU.drdzs); cms::cuda::free_device(dev,modulesInGPU.slopes); cms::cuda::free_device(dev,modulesInGPU.nModules); cms::cuda::free_device(dev,modulesInGPU.nLowerModules); cms::cuda::free_device(dev,modulesInGPU.layers); cms::cuda::free_device(dev,modulesInGPU.rings); cms::cuda::free_device(dev,modulesInGPU.modules); cms::cuda::free_device(dev,modulesInGPU.rods); cms::cuda::free_device(dev,modulesInGPU.subdets); cms::cuda::free_device(dev,modulesInGPU.sides); cms::cuda::free_device(dev,modulesInGPU.isInverted); cms::cuda::free_device(dev,modulesInGPU.isLower); cms::cuda::free_device(dev,modulesInGPU.hitRanges); cms::cuda::free_device(dev,modulesInGPU.mdRanges); cms::cuda::free_device(dev,modulesInGPU.segmentRanges); cms::cuda::free_device(dev,modulesInGPU.trackletRanges); cms::cuda::free_device(dev,modulesInGPU.tripletRanges); cms::cuda::free_device(dev,modulesInGPU.trackCandidateRanges); cms::cuda::free_device(dev,modulesInGPU.quintupletRanges); cms::cuda::free_device(dev,modulesInGPU.moduleType); cms::cuda::free_device(dev,modulesInGPU.moduleLayerType); cms::cuda::free_device(dev,modulesInGPU.lowerModuleIndices); cms::cuda::free_device(dev,modulesInGPU.reverseLookupLowerModuleIndices); cms::cuda::free_device(dev,modulesInGPU.trackCandidateModuleIndices); cms::cuda::free_device(dev,modulesInGPU.quintupletModuleIndices); cms::cuda::free_device(dev,modulesInGPU.nEligibleModules); cms::cuda::free_device(dev,modulesInGPU.nEligibleT5Modules); cms::cuda::free_device(dev,modulesInGPU.connectedPixels); #else cms::cuda::free_managed(modulesInGPU.detIds); cms::cuda::free_managed(modulesInGPU.moduleMap); cms::cuda::free_managed(modulesInGPU.nConnectedModules); cms::cuda::free_managed(modulesInGPU.drdzs); cms::cuda::free_managed(modulesInGPU.slopes); cms::cuda::free_managed(modulesInGPU.nModules); cms::cuda::free_managed(modulesInGPU.nLowerModules); cms::cuda::free_managed(modulesInGPU.layers); cms::cuda::free_managed(modulesInGPU.rings); cms::cuda::free_managed(modulesInGPU.modules); cms::cuda::free_managed(modulesInGPU.rods); cms::cuda::free_managed(modulesInGPU.subdets); cms::cuda::free_managed(modulesInGPU.sides); cms::cuda::free_managed(modulesInGPU.isInverted); cms::cuda::free_managed(modulesInGPU.isLower); cms::cuda::free_managed(modulesInGPU.hitRanges); cms::cuda::free_managed(modulesInGPU.mdRanges); cms::cuda::free_managed(modulesInGPU.segmentRanges); cms::cuda::free_managed(modulesInGPU.trackletRanges); cms::cuda::free_managed(modulesInGPU.tripletRanges); cms::cuda::free_managed(modulesInGPU.trackCandidateRanges); cms::cuda::free_managed(modulesInGPU.quintupletRanges); cms::cuda::free_managed(modulesInGPU.moduleType); cms::cuda::free_managed(modulesInGPU.moduleLayerType); cms::cuda::free_managed(modulesInGPU.lowerModuleIndices); cms::cuda::free_managed(modulesInGPU.reverseLookupLowerModuleIndices); cms::cuda::free_managed(modulesInGPU.trackCandidateModuleIndices); cms::cuda::free_managed(modulesInGPU.quintupletModuleIndices); cms::cuda::free_managed(modulesInGPU.nEligibleModules); cms::cuda::free_managed(modulesInGPU.nEligibleT5Modules); cms::cuda::free_managed(modulesInGPU.connectedPixels); #endif cudaFreeHost(pixelMapping.connectedPixelsSizes); cudaFreeHost(pixelMapping.connectedPixelsSizesPos); cudaFreeHost(pixelMapping.connectedPixelsSizesNeg); cudaFreeHost(pixelMapping.connectedPixelsIndex); cudaFreeHost(pixelMapping.connectedPixelsIndexPos); cudaFreeHost(pixelMapping.connectedPixelsIndexNeg); } void SDL::freeModules(struct modules& modulesInGPU, struct pixelMap& pixelMapping) { cudaFree(modulesInGPU.detIds); cudaFree(modulesInGPU.moduleMap); cudaFree(modulesInGPU.nConnectedModules); cudaFree(modulesInGPU.drdzs); cudaFree(modulesInGPU.slopes); cudaFree(modulesInGPU.nModules); cudaFree(modulesInGPU.nLowerModules); cudaFree(modulesInGPU.layers); cudaFree(modulesInGPU.rings); cudaFree(modulesInGPU.modules); cudaFree(modulesInGPU.rods); cudaFree(modulesInGPU.subdets); cudaFree(modulesInGPU.sides); cudaFree(modulesInGPU.isInverted); cudaFree(modulesInGPU.isLower); cudaFree(modulesInGPU.hitRanges); cudaFree(modulesInGPU.mdRanges); cudaFree(modulesInGPU.segmentRanges); cudaFree(modulesInGPU.trackletRanges); cudaFree(modulesInGPU.tripletRanges); cudaFree(modulesInGPU.trackCandidateRanges); cudaFree(modulesInGPU.quintupletRanges); cudaFree(modulesInGPU.moduleType); cudaFree(modulesInGPU.moduleLayerType); cudaFree(modulesInGPU.lowerModuleIndices); cudaFree(modulesInGPU.reverseLookupLowerModuleIndices); cudaFree(modulesInGPU.trackCandidateModuleIndices); cudaFree(modulesInGPU.quintupletModuleIndices); cudaFree(modulesInGPU.nEligibleModules); cudaFree(modulesInGPU.nEligibleT5Modules); cudaFree(modulesInGPU.connectedPixels); cudaFreeHost(pixelMapping.connectedPixelsSizes); cudaFreeHost(pixelMapping.connectedPixelsSizesPos); cudaFreeHost(pixelMapping.connectedPixelsSizesNeg); cudaFreeHost(pixelMapping.connectedPixelsIndex); cudaFreeHost(pixelMapping.connectedPixelsIndexPos); cudaFreeHost(pixelMapping.connectedPixelsIndexNeg); } void SDL::createLowerModuleIndexMapExplicit(struct modules& modulesInGPU, unsigned int nLowerModules, unsigned int nModules,bool* isLower) { //FIXME:some hacks to get the pixel module in the lower modules index without incrementing nLowerModules counter! //Reproduce these hacks in the explicit memory for identical results (or come up with a better method) unsigned int* lowerModuleIndices; int* reverseLookupLowerModuleIndices; cudaMallocHost(&lowerModuleIndices,(nLowerModules + 1) * sizeof(unsigned int)); cudaMallocHost(&reverseLookupLowerModuleIndices,nModules * sizeof(int)); unsigned int lowerModuleCounter = 0; for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); it++) { unsigned int index = it->second; unsigned int detId = it->first; if(isLower[index]) { lowerModuleIndices[lowerModuleCounter] = index; reverseLookupLowerModuleIndices[index] = lowerModuleCounter; lowerModuleCounter++; } else { reverseLookupLowerModuleIndices[index] = -1; } } //hacky stuff "beyond the index" for the pixel module. nLowerModules will *NOT* cover the pixel module! lowerModuleIndices[nLowerModules] = (*detIdToIndex)[1]; reverseLookupLowerModuleIndices[(*detIdToIndex)[1]] = nLowerModules; #ifdef CACHE_ALLOC cudaStream_t stream =0; int dev; cudaGetDevice(&dev); modulesInGPU.lowerModuleIndices = (unsigned int*)cms::cuda::allocate_device(dev,(nLowerModules + 1) * sizeof(unsigned int),stream); modulesInGPU.reverseLookupLowerModuleIndices = (int*)cms::cuda::allocate_device(dev,nModules * sizeof(int),stream); modulesInGPU.trackCandidateModuleIndices = (int*)cms::cuda::allocate_device(dev,(nLowerModules + 1) * sizeof(int),stream); modulesInGPU.quintupletModuleIndices = (int*)cms::cuda::allocate_device(dev,nLowerModules * sizeof(int),stream); #else cudaMalloc(&modulesInGPU.lowerModuleIndices,(nLowerModules + 1) * sizeof(unsigned int)); cudaMalloc(&modulesInGPU.reverseLookupLowerModuleIndices,nModules * sizeof(int)); cudaMalloc(&modulesInGPU.trackCandidateModuleIndices, (nLowerModules + 1) * sizeof(int)); cudaMalloc(&modulesInGPU.quintupletModuleIndices, nLowerModules * sizeof(int)); #endif cudaMemcpy(modulesInGPU.lowerModuleIndices,lowerModuleIndices,sizeof(unsigned int)*(nLowerModules+1),cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.reverseLookupLowerModuleIndices,reverseLookupLowerModuleIndices,sizeof(int)*nModules,cudaMemcpyHostToDevice); cudaFreeHost(lowerModuleIndices); cudaFreeHost(reverseLookupLowerModuleIndices); } void SDL::createLowerModuleIndexMap(struct modules& modulesInGPU, unsigned int nLowerModules, unsigned int nModules) { //FIXME:some hacks to get the pixel module in the lower modules index without incrementing nLowerModules counter! //Reproduce these hacks in the explicit memory for identical results (or come up with a better method) #ifdef CACHE_ALLOC cudaStream_t stream =0; modulesInGPU.lowerModuleIndices = (unsigned int*)cms::cuda::allocate_managed((nLowerModules + 1) * sizeof(unsigned int),stream); modulesInGPU.reverseLookupLowerModuleIndices = (int*)cms::cuda::allocate_managed(nModules * sizeof(int),stream); modulesInGPU.trackCandidateModuleIndices = (int*)cms::cuda::allocate_managed((nLowerModules + 1) * sizeof(int),stream); modulesInGPU.quintupletModuleIndices = (int*)cms::cuda::allocate_managed(nLowerModules * sizeof(int),stream); #else cudaMallocManaged(&modulesInGPU.lowerModuleIndices,(nLowerModules + 1) * sizeof(unsigned int)); cudaMallocManaged(&modulesInGPU.reverseLookupLowerModuleIndices,nModules * sizeof(int)); cudaMallocManaged(&modulesInGPU.trackCandidateModuleIndices, (nLowerModules + 1) * sizeof(int)); cudaMallocManaged(&modulesInGPU.quintupletModuleIndices, nLowerModules * sizeof(int)); #endif unsigned int lowerModuleCounter = 0; for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); it++) { unsigned int index = it->second; unsigned int detId = it->first; if(modulesInGPU.isLower[index]) { modulesInGPU.lowerModuleIndices[lowerModuleCounter] = index; modulesInGPU.reverseLookupLowerModuleIndices[index] = lowerModuleCounter; lowerModuleCounter++; } else { modulesInGPU.reverseLookupLowerModuleIndices[index] = -1; } } //hacky stuff "beyond the index" for the pixel module. nLowerModules will *NOT* cover the pixel module! modulesInGPU.lowerModuleIndices[nLowerModules] = (*detIdToIndex)[1]; modulesInGPU.reverseLookupLowerModuleIndices[(*detIdToIndex)[1]] = nLowerModules; } void SDL::loadModulesFromFile(struct modules& modulesInGPU, unsigned int& nModules, struct pixelMap& pixelMapping, const char* moduleMetaDataFilePath) { detIdToIndex = new std::map<unsigned int, unsigned int>; /*modules structure object will be created in Event.cu*/ /* Load the whole text file into the unordered_map first*/ std::ifstream ifile; ifile.open(moduleMetaDataFilePath); if(!ifile.is_open()) { std::cout<<"ERROR! module list file not present!"<<std::endl; } std::string line; unsigned int counter = 0; while(std::getline(ifile,line)) { std::stringstream ss(line); std::string token; bool flag = 0; while(std::getline(ss,token,',')) { if(flag == 1) break; (*detIdToIndex)[stoi(token)] = counter; flag = 1; counter++; } } (*detIdToIndex)[1] = counter; //pixel module is the last module in the module list counter++; nModules = counter; std::cout<<"Number of modules = "<<nModules<<std::endl; #ifdef Explicit_Module createModulesInExplicitMemory(modulesInGPU,nModules); unsigned int* lowerModuleCounter;// = 0; cudaMallocHost(&lowerModuleCounter,sizeof(unsigned int)); cudaMemset(lowerModuleCounter,0,sizeof(unsigned int)); unsigned int* host_detIds; short* host_layers; short* host_rings; short* host_rods; short* host_modules; short* host_subdets; short* host_sides; bool* host_isInverted; bool* host_isLower; ModuleType* host_moduleType; ModuleLayerType* host_moduleLayerType; float* host_slopes; float* host_drdzs; cudaMallocHost(&host_detIds,sizeof(unsigned int)*nModules); cudaMallocHost(&host_layers,sizeof(short)*nModules); cudaMallocHost(&host_rings,sizeof(short)*nModules); cudaMallocHost(&host_rods,sizeof(short)*nModules); cudaMallocHost(&host_modules,sizeof(short)*nModules); cudaMallocHost(&host_subdets,sizeof(short)*nModules); cudaMallocHost(&host_sides,sizeof(short)*nModules); cudaMallocHost(&host_isInverted,sizeof(bool)*nModules); cudaMallocHost(&host_isLower,sizeof(bool)*nModules); cudaMallocHost(&host_moduleType,sizeof(ModuleType)*nModules); cudaMallocHost(&host_moduleLayerType,sizeof(ModuleLayerType)*nModules); cudaMallocHost(&host_slopes,sizeof(float)*nModules); cudaMallocHost(&host_drdzs,sizeof(float)*nModules); for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); it++) { unsigned int detId = it->first; unsigned int index = it->second; host_detIds[index] = detId; if(detId == 1) { host_layers[index] = 0; host_rings[index] = 0; host_rods[index] = 0; host_modules[index] = 0; host_subdets[index] = SDL::InnerPixel; host_sides[index] = 0; host_isInverted[index] = 0; host_isLower[index] = false; host_moduleType[index] = PixelModule; host_moduleLayerType[index] = SDL::InnerPixelLayer; host_slopes[index] = 0; host_drdzs[index] = 0; } else { unsigned short layer,ring,rod,module,subdet,side; setDerivedQuantities(detId,layer,ring,rod,module,subdet,side); host_layers[index] = layer; host_rings[index] = ring; host_rods[index] = rod; host_modules[index] = module; host_subdets[index] = subdet; host_sides[index] = side; host_isInverted[index] = modulesInGPU.parseIsInverted(index,subdet, side,module,layer); host_isLower[index] = modulesInGPU.parseIsLower(index, host_isInverted[index], detId); host_moduleType[index] = modulesInGPU.parseModuleType(index, subdet, layer, ring); host_moduleLayerType[index] = modulesInGPU.parseModuleLayerType(index, host_moduleType[index],host_isInverted[index],host_isLower[index]); host_slopes[index] = (subdet == Endcap) ? endcapGeometry.getSlopeLower(detId) : tiltedGeometry.getSlope(detId); host_drdzs[index] = (subdet == Barrel) ? tiltedGeometry.getDrDz(detId) : 0; } lowerModuleCounter[0] += host_isLower[index]; } cudaMemcpy(modulesInGPU.nLowerModules,lowerModuleCounter,sizeof(unsigned int),cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.detIds,host_detIds,nModules*sizeof(unsigned int),cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.layers,host_layers,nModules*sizeof(short),cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.rings,host_rings,sizeof(short)*nModules,cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.rods,host_rods,sizeof(short)*nModules,cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.modules,host_modules,sizeof(short)*nModules,cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.subdets,host_subdets,sizeof(short)*nModules,cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.sides,host_sides,sizeof(short)*nModules,cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.isInverted,host_isInverted,sizeof(bool)*nModules,cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.isLower,host_isLower,sizeof(bool)*nModules,cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.moduleType,host_moduleType,sizeof(ModuleType)*nModules,cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.moduleLayerType,host_moduleLayerType,sizeof(ModuleLayerType)*nModules,cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.slopes,host_slopes,sizeof(float)*nModules,cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.drdzs,host_drdzs,sizeof(float)*nModules,cudaMemcpyHostToDevice); cudaFreeHost(host_detIds); cudaFreeHost(host_layers); cudaFreeHost(host_rings); cudaFreeHost(host_rods); cudaFreeHost(host_modules); cudaFreeHost(host_subdets); cudaFreeHost(host_sides); cudaFreeHost(host_isInverted); cudaFreeHost(host_isLower); cudaFreeHost(host_moduleType); cudaFreeHost(host_moduleLayerType); cudaFreeHost(host_slopes); cudaFreeHost(host_drdzs); cudaFreeHost(lowerModuleCounter); std::cout<<"number of lower modules (without fake pixel module)= "<<lowerModuleCounter[0]<<std::endl; createLowerModuleIndexMapExplicit(modulesInGPU,lowerModuleCounter[0], nModules,host_isLower); fillConnectedModuleArrayExplicit(modulesInGPU,nModules); fillPixelMap(modulesInGPU,pixelMapping); resetObjectRanges(modulesInGPU,nModules); #else createModulesInUnifiedMemory(modulesInGPU,nModules); unsigned int lowerModuleCounter = 0; for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); it++) { unsigned int detId = it->first; unsigned int index = it->second; modulesInGPU.detIds[index] = detId; if(detId == 1) { modulesInGPU.layers[index] = 0; modulesInGPU.rings[index] = 0; modulesInGPU.rods[index] = 0; modulesInGPU.modules[index] = 0; modulesInGPU.subdets[index] = SDL::InnerPixel; modulesInGPU.sides[index] = 0; modulesInGPU.isInverted[index] = 0; modulesInGPU.isLower[index] = false; modulesInGPU.moduleType[index] = PixelModule; modulesInGPU.moduleLayerType[index] = SDL::InnerPixelLayer; modulesInGPU.slopes[index] = 0; modulesInGPU.drdzs[index] = 0; } else { unsigned short layer,ring,rod,module,subdet,side; setDerivedQuantities(detId,layer,ring,rod,module,subdet,side); modulesInGPU.layers[index] = layer; modulesInGPU.rings[index] = ring; modulesInGPU.rods[index] = rod; modulesInGPU.modules[index] = module; modulesInGPU.subdets[index] = subdet; modulesInGPU.sides[index] = side; modulesInGPU.isInverted[index] = modulesInGPU.parseIsInverted(index); modulesInGPU.isLower[index] = modulesInGPU.parseIsLower(index); modulesInGPU.moduleType[index] = modulesInGPU.parseModuleType(index); modulesInGPU.moduleLayerType[index] = modulesInGPU.parseModuleLayerType(index); modulesInGPU.slopes[index] = (subdet == Endcap) ? endcapGeometry.getSlopeLower(detId) : tiltedGeometry.getSlope(detId); modulesInGPU.drdzs[index] = (subdet == Barrel) ? tiltedGeometry.getDrDz(detId) : 0; } if(modulesInGPU.isLower[index]) lowerModuleCounter++; } *modulesInGPU.nLowerModules = lowerModuleCounter; std::cout<<"number of lower modules (without fake pixel module)= "<<*modulesInGPU.nLowerModules<<std::endl; createLowerModuleIndexMap(modulesInGPU,lowerModuleCounter, nModules); fillConnectedModuleArray(modulesInGPU,nModules); fillPixelMap(modulesInGPU,pixelMapping); resetObjectRanges(modulesInGPU,nModules); #endif } void SDL::fillPixelMap(struct modules& modulesInGPU, struct pixelMap& pixelMapping) { int size_superbins = 45000;//SDL::moduleConnectionMap_pLStoLayer1Subdet5.size(); //changed to 45000 to reduce memory useage on GPU std::vector<unsigned int> connectedModuleDetIds; std::vector<unsigned int> connectedModuleDetIds_pos; std::vector<unsigned int> connectedModuleDetIds_neg; unsigned int* connectedPixelsIndex; unsigned int* connectedPixelsIndexPos; unsigned int* connectedPixelsIndexNeg; unsigned int* connectedPixelsSizes; unsigned int* connectedPixelsSizesPos; unsigned int* connectedPixelsSizesNeg; cudaMallocHost(&pixelMapping.connectedPixelsIndex,size_superbins * sizeof(unsigned int)); cudaMallocHost(&pixelMapping.connectedPixelsSizes,size_superbins * sizeof(unsigned int)); cudaMallocHost(&pixelMapping.connectedPixelsIndexPos,size_superbins * sizeof(unsigned int)); cudaMallocHost(&pixelMapping.connectedPixelsSizesPos,size_superbins * sizeof(unsigned int)); cudaMallocHost(&pixelMapping.connectedPixelsIndexNeg,size_superbins * sizeof(unsigned int)); cudaMallocHost(&pixelMapping.connectedPixelsSizesNeg,size_superbins * sizeof(unsigned int)); int totalSizes=0; int totalSizes_pos=0; int totalSizes_neg=0; for(int isuperbin =0; isuperbin<size_superbins; isuperbin++) { std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet5 = SDL::moduleConnectionMap_pLStoLayer1Subdet5.getConnectedModuleDetIds(isuperbin+size_superbins);// index adjustment to get high values std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet5 = SDL::moduleConnectionMap_pLStoLayer2Subdet5.getConnectedModuleDetIds(isuperbin+size_superbins);// from the high pt bins std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet5 = SDL::moduleConnectionMap_pLStoLayer3Subdet5.getConnectedModuleDetIds(isuperbin+size_superbins); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet4 = SDL::moduleConnectionMap_pLStoLayer1Subdet4.getConnectedModuleDetIds(isuperbin+size_superbins); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet4 = SDL::moduleConnectionMap_pLStoLayer2Subdet4.getConnectedModuleDetIds(isuperbin+size_superbins); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet4 = SDL::moduleConnectionMap_pLStoLayer3Subdet4.getConnectedModuleDetIds(isuperbin+size_superbins); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer4Subdet4 = SDL::moduleConnectionMap_pLStoLayer4Subdet4.getConnectedModuleDetIds(isuperbin+size_superbins); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer1Subdet5.begin(),connectedModuleDetIds_pLStoLayer1Subdet5.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer2Subdet5.begin(),connectedModuleDetIds_pLStoLayer2Subdet5.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer3Subdet5.begin(),connectedModuleDetIds_pLStoLayer3Subdet5.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer1Subdet4.begin(),connectedModuleDetIds_pLStoLayer1Subdet4.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer2Subdet4.begin(),connectedModuleDetIds_pLStoLayer2Subdet4.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer3Subdet4.begin(),connectedModuleDetIds_pLStoLayer3Subdet4.end()); connectedModuleDetIds.insert(connectedModuleDetIds.end(),connectedModuleDetIds_pLStoLayer4Subdet4.begin(),connectedModuleDetIds_pLStoLayer4Subdet4.end()); int sizes =0; sizes += connectedModuleDetIds_pLStoLayer1Subdet5.size(); sizes += connectedModuleDetIds_pLStoLayer2Subdet5.size(); sizes += connectedModuleDetIds_pLStoLayer3Subdet5.size(); sizes += connectedModuleDetIds_pLStoLayer1Subdet4.size(); sizes += connectedModuleDetIds_pLStoLayer2Subdet4.size(); sizes += connectedModuleDetIds_pLStoLayer3Subdet4.size(); sizes += connectedModuleDetIds_pLStoLayer4Subdet4.size(); pixelMapping.connectedPixelsIndex[isuperbin] = totalSizes; pixelMapping.connectedPixelsSizes[isuperbin] = sizes; totalSizes += sizes; std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet5_pos = SDL::moduleConnectionMap_pLStoLayer1Subdet5_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet5_pos = SDL::moduleConnectionMap_pLStoLayer2Subdet5_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet5_pos = SDL::moduleConnectionMap_pLStoLayer3Subdet5_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet4_pos = SDL::moduleConnectionMap_pLStoLayer1Subdet4_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet4_pos = SDL::moduleConnectionMap_pLStoLayer2Subdet4_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet4_pos = SDL::moduleConnectionMap_pLStoLayer3Subdet4_pos.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer4Subdet4_pos = SDL::moduleConnectionMap_pLStoLayer4Subdet4_pos.getConnectedModuleDetIds(isuperbin); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer1Subdet5_pos.begin(),connectedModuleDetIds_pLStoLayer1Subdet5_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer2Subdet5_pos.begin(),connectedModuleDetIds_pLStoLayer2Subdet5_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer3Subdet5_pos.begin(),connectedModuleDetIds_pLStoLayer3Subdet5_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer1Subdet4_pos.begin(),connectedModuleDetIds_pLStoLayer1Subdet4_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer2Subdet4_pos.begin(),connectedModuleDetIds_pLStoLayer2Subdet4_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer3Subdet4_pos.begin(),connectedModuleDetIds_pLStoLayer3Subdet4_pos.end()); connectedModuleDetIds_pos.insert(connectedModuleDetIds_pos.end(),connectedModuleDetIds_pLStoLayer4Subdet4_pos.begin(),connectedModuleDetIds_pLStoLayer4Subdet4_pos.end()); int sizes_pos =0; sizes_pos += connectedModuleDetIds_pLStoLayer1Subdet5_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer2Subdet5_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer3Subdet5_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer1Subdet4_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer2Subdet4_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer3Subdet4_pos.size(); sizes_pos += connectedModuleDetIds_pLStoLayer4Subdet4_pos.size(); pixelMapping.connectedPixelsIndexPos[isuperbin] = totalSizes_pos; pixelMapping.connectedPixelsSizesPos[isuperbin] = sizes_pos; totalSizes_pos += sizes_pos; std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet5_neg = SDL::moduleConnectionMap_pLStoLayer1Subdet5_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet5_neg = SDL::moduleConnectionMap_pLStoLayer2Subdet5_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet5_neg = SDL::moduleConnectionMap_pLStoLayer3Subdet5_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer1Subdet4_neg = SDL::moduleConnectionMap_pLStoLayer1Subdet4_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer2Subdet4_neg = SDL::moduleConnectionMap_pLStoLayer2Subdet4_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer3Subdet4_neg = SDL::moduleConnectionMap_pLStoLayer3Subdet4_neg.getConnectedModuleDetIds(isuperbin); std::vector<unsigned int> connectedModuleDetIds_pLStoLayer4Subdet4_neg = SDL::moduleConnectionMap_pLStoLayer4Subdet4_neg.getConnectedModuleDetIds(isuperbin); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer1Subdet5_neg.begin(),connectedModuleDetIds_pLStoLayer1Subdet5_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer2Subdet5_neg.begin(),connectedModuleDetIds_pLStoLayer2Subdet5_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer3Subdet5_neg.begin(),connectedModuleDetIds_pLStoLayer3Subdet5_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer1Subdet4_neg.begin(),connectedModuleDetIds_pLStoLayer1Subdet4_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer2Subdet4_neg.begin(),connectedModuleDetIds_pLStoLayer2Subdet4_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer3Subdet4_neg.begin(),connectedModuleDetIds_pLStoLayer3Subdet4_neg.end()); connectedModuleDetIds_neg.insert(connectedModuleDetIds_neg.end(),connectedModuleDetIds_pLStoLayer4Subdet4_neg.begin(),connectedModuleDetIds_pLStoLayer4Subdet4_neg.end()); int sizes_neg =0; sizes_neg += connectedModuleDetIds_pLStoLayer1Subdet5_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer2Subdet5_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer3Subdet5_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer1Subdet4_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer2Subdet4_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer3Subdet4_neg.size(); sizes_neg += connectedModuleDetIds_pLStoLayer4Subdet4_neg.size(); pixelMapping.connectedPixelsIndexNeg[isuperbin] = totalSizes_neg; pixelMapping.connectedPixelsSizesNeg[isuperbin] = sizes_neg; totalSizes_neg += sizes_neg; } unsigned int* connectedPixels; cudaMallocHost(&connectedPixels,(totalSizes+totalSizes_pos+totalSizes_neg) * sizeof(unsigned int)); #ifdef CACHE_ALLOC cudaStream_t stream=0; #ifdef Explicit_Module int dev; cudaGetDevice(&dev); modulesInGPU.connectedPixels = (unsigned int*)cms::cuda::allocate_device(dev,(totalSizes+totalSizes_pos+totalSizes_neg) * sizeof(unsigned int),stream); #else modulesInGPU.connectedPixels = (unsigned int*)cms::cuda::allocate_managed((totalSizes+totalSizes_pos+totalSizes_neg) * sizeof(unsigned int),stream); #endif #else #ifdef Explicit_Module cudaMalloc(&modulesInGPU.connectedPixels,(totalSizes+totalSizes_pos+totalSizes_neg)* sizeof(unsigned int)); #else cudaMallocManaged(&modulesInGPU.connectedPixels,(totalSizes+totalSizes_pos+totalSizes_neg)* sizeof(unsigned int)); #endif #endif for(int icondet=0; icondet< totalSizes; icondet++){ connectedPixels[icondet] = (*detIdToIndex)[connectedModuleDetIds[icondet]]; } for(int icondet=0; icondet< totalSizes_pos; icondet++){ connectedPixels[icondet+totalSizes] = (*detIdToIndex)[connectedModuleDetIds_pos[icondet]]; } for(int icondet=0; icondet< totalSizes_neg; icondet++){ connectedPixels[icondet+totalSizes+totalSizes_pos] = (*detIdToIndex)[connectedModuleDetIds_neg[icondet]]; } cudaMemcpy(modulesInGPU.connectedPixels,connectedPixels,(totalSizes+totalSizes_pos+totalSizes_neg)*sizeof(unsigned int),cudaMemcpyHostToDevice); cudaFreeHost(connectedPixels); } void SDL::fillConnectedModuleArrayExplicit(struct modules& modulesInGPU, unsigned int nModules) { unsigned int* moduleMap; unsigned int* nConnectedModules; cudaMallocHost(&moduleMap,nModules * 40 * sizeof(unsigned int)); cudaMallocHost(&nConnectedModules,nModules * sizeof(unsigned int)); for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); ++it) { unsigned int detId = it->first; unsigned int index = it->second; auto& connectedModules = moduleConnectionMap.getConnectedModuleDetIds(detId); nConnectedModules[index] = connectedModules.size(); for(unsigned int i = 0; i< nConnectedModules[index];i++) { moduleMap[index * 40 + i] = (*detIdToIndex)[connectedModules[i]]; } } cudaMemcpy(modulesInGPU.moduleMap,moduleMap,nModules*40*sizeof(unsigned int),cudaMemcpyHostToDevice); cudaMemcpy(modulesInGPU.nConnectedModules,nConnectedModules,nModules*sizeof(unsigned int),cudaMemcpyHostToDevice); cudaFreeHost(moduleMap); cudaFreeHost(nConnectedModules); } void SDL::fillConnectedModuleArray(struct modules& modulesInGPU, unsigned int nModules) { for(auto it = (*detIdToIndex).begin(); it != (*detIdToIndex).end(); ++it) { unsigned int detId = it->first; unsigned int index = it->second; auto& connectedModules = moduleConnectionMap.getConnectedModuleDetIds(detId); modulesInGPU.nConnectedModules[index] = connectedModules.size(); for(unsigned int i = 0; i< modulesInGPU.nConnectedModules[index];i++) { modulesInGPU.moduleMap[index * 40 + i] = (*detIdToIndex)[connectedModules[i]]; } } } void SDL::setDerivedQuantities(unsigned int detId, unsigned short& layer, unsigned short& ring, unsigned short& rod, unsigned short& module, unsigned short& subdet, unsigned short& side) { subdet = (detId & (7 << 25)) >> 25; side = (subdet == Endcap) ? (detId & (3 << 23)) >> 23 : (detId & (3 << 18)) >> 18; layer = (subdet == Endcap) ? (detId & (7 << 18)) >> 18 : (detId & (7 << 20)) >> 20; ring = (subdet == Endcap) ? (detId & (15 << 12)) >> 12 : 0; module = (detId & (127 << 2)) >> 2; rod = (subdet == Endcap) ? 0 : (detId & (127 << 10)) >> 10; } //auxilliary functions - will be called as needed bool SDL::modules::parseIsInverted(unsigned int index) { if (subdets[index] == Endcap) { if (sides[index] == NegZ) { return modules[index] % 2 == 1; } else if (sides[index] == PosZ) { return modules[index] % 2 == 0; } else { return 0; } } else if (subdets[index] == Barrel) { if (sides[index] == Center) { if (layers[index] <= 3) { return modules[index] % 2 == 1; } else if (layers[index] >= 4) { return modules[index] % 2 == 0; } else { return 0; } } else if (sides[index] == NegZ or sides[index] == PosZ) { if (layers[index] <= 2) { return modules[index] % 2 == 1; } else if (layers[index] == 3) { return modules[index] % 2 == 0; } else { return 0; } } else { return 0; } } else { return 0; } } bool SDL::modules::parseIsInverted(unsigned int index, short subdet, short side, short module, short layer) { if (subdet == Endcap) { if (side == NegZ) { return module % 2 == 1; } else if (side == PosZ) { return module % 2 == 0; } else { return 0; } } else if (subdet == Barrel) { if (side == Center) { if (layer <= 3) { return module % 2 == 1; } else if (layer >= 4) { return module % 2 == 0; } else { return 0; } } else if (side == NegZ or side == PosZ) { if (layer <= 2) { return module % 2 == 1; } else if (layer == 3) { return module % 2 == 0; } else { return 0; } } else { return 0; } } else { return 0; } } bool SDL::modules::parseIsLower(unsigned int index, bool isInvertedx, unsigned int detId) { return (isInvertedx) ? !(detId & 1) : (detId & 1); } bool SDL::modules::parseIsLower(unsigned int index) { return (isInverted[index]) ? !(detIds[index] & 1) : (detIds[index] & 1); } unsigned int SDL::modules::partnerModuleIndexExplicit(unsigned int index, bool isLowerx, bool isInvertedx) { /*We need to ensure modules with successive det Ids are right next to each other or we're dead*/ if(isLowerx) { return (isInvertedx ? index - 1: index + 1); } else { return (isInvertedx ? index + 1 : index - 1); } } unsigned int SDL::modules::partnerModuleIndex(unsigned int index) { /*We need to ensure modules with successive det Ids are right next to each other or we're dead*/ if(isLower[index]) { return (isInverted[index] ? index - 1: index + 1); } else { return (isInverted[index] ? index + 1 : index - 1); } } SDL::ModuleType SDL::modules::parseModuleType(unsigned int index, short subdet, short layer, short ring) { if(subdet == Barrel) { if(layer <= 3) { return PS; } else { return TwoS; } } else { if(layer <= 2) { if(ring <= 10) { return PS; } else { return TwoS; } } else { if(ring <= 7) { return PS; } else { return TwoS; } } } } SDL::ModuleType SDL::modules::parseModuleType(unsigned int index) { if(subdets[index] == Barrel) { if(layers[index] <= 3) { return PS; } else { return TwoS; } } else { if(layers[index] <= 2) { if(rings[index] <= 10) { return PS; } else { return TwoS; } } else { if(rings[index] <= 7) { return PS; } else { return TwoS; } } } } SDL::ModuleLayerType SDL::modules::parseModuleLayerType(unsigned int index, ModuleType moduleTypex,bool isInvertedx, bool isLowerx) { if(moduleTypex == TwoS) { return Strip; } if(isInvertedx) { if(isLowerx) { return Strip; } else { return Pixel; } } else { if(isLowerx) { return Pixel; } else { return Strip; } } } SDL::ModuleLayerType SDL::modules::parseModuleLayerType(unsigned int index) { if(moduleType[index] == TwoS) { return Strip; } if(isInverted[index]) { if(isLower[index]) { return Strip; } else { return Pixel; } } else { if(isLower[index]) { return Pixel; } else { return Strip; } } } void SDL::resetObjectRanges(struct modules& modulesInGPU, unsigned int nModules) { #ifdef Explicit_Module cudaMemset(modulesInGPU.hitRanges, -1,nModules*2*sizeof(int)); cudaMemset(modulesInGPU.mdRanges, -1,nModules*2*sizeof(int)); cudaMemset(modulesInGPU.segmentRanges, -1,nModules*2*sizeof(int)); cudaMemset(modulesInGPU.trackletRanges, -1,nModules*2*sizeof(int)); cudaMemset(modulesInGPU.tripletRanges, -1,nModules*2*sizeof(int)); cudaMemset(modulesInGPU.trackCandidateRanges, -1,nModules*2*sizeof(int)); cudaMemset(modulesInGPU.quintupletRanges, -1, nModules*2*sizeof(int)); #else #pragma omp parallel for default(shared) for(size_t i = 0; i<nModules *2; i++) { modulesInGPU.hitRanges[i] = -1; modulesInGPU.mdRanges[i] = -1; modulesInGPU.segmentRanges[i] = -1; modulesInGPU.trackletRanges[i] = -1; modulesInGPU.tripletRanges[i] = -1; modulesInGPU.trackCandidateRanges[i] = -1; modulesInGPU.quintupletRanges[i] = -1; } #endif }
b8668d694fe871ef3a7e1a74697add122e7909da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_r2; int xdim0_update_halo_kernel1_r2_h = -1; __constant__ int ydim0_update_halo_kernel1_r2; int ydim0_update_halo_kernel1_r2_h = -1; __constant__ int xdim1_update_halo_kernel1_r2; int xdim1_update_halo_kernel1_r2_h = -1; __constant__ int ydim1_update_halo_kernel1_r2; int ydim1_update_halo_kernel1_r2_h = -1; __constant__ int xdim2_update_halo_kernel1_r2; int xdim2_update_halo_kernel1_r2_h = -1; __constant__ int ydim2_update_halo_kernel1_r2; int ydim2_update_halo_kernel1_r2_h = -1; __constant__ int xdim3_update_halo_kernel1_r2; int xdim3_update_halo_kernel1_r2_h = -1; __constant__ int ydim3_update_halo_kernel1_r2; int ydim3_update_halo_kernel1_r2_h = -1; __constant__ int xdim4_update_halo_kernel1_r2; int xdim4_update_halo_kernel1_r2_h = -1; __constant__ int ydim4_update_halo_kernel1_r2; int ydim4_update_halo_kernel1_r2_h = -1; __constant__ int xdim5_update_halo_kernel1_r2; int xdim5_update_halo_kernel1_r2_h = -1; __constant__ int ydim5_update_halo_kernel1_r2; int ydim5_update_halo_kernel1_r2_h = -1; __constant__ int xdim6_update_halo_kernel1_r2; int xdim6_update_halo_kernel1_r2_h = -1; __constant__ int ydim6_update_halo_kernel1_r2; int ydim6_update_halo_kernel1_r2_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_r2*(y)+xdim0_update_halo_kernel1_r2*ydim0_update_halo_kernel1_r2*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_r2*(y)+xdim1_update_halo_kernel1_r2*ydim1_update_halo_kernel1_r2*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_r2*(y)+xdim2_update_halo_kernel1_r2*ydim2_update_halo_kernel1_r2*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_r2*(y)+xdim3_update_halo_kernel1_r2*ydim3_update_halo_kernel1_r2*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_r2*(y)+xdim4_update_halo_kernel1_r2*ydim4_update_halo_kernel1_r2*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_r2*(y)+xdim5_update_halo_kernel1_r2*ydim5_update_halo_kernel1_r2*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_r2*(y)+xdim6_update_halo_kernel1_r2*ydim6_update_halo_kernel1_r2*(z)) //user function __device__ inline void update_halo_kernel1_r2(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed , const int* fields) { if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(-3,0,0)]; if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(-3,0,0)]; if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(-3,0,0)]; if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(-3,0,0)]; if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(-3,0,0)]; if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(-3,0,0)]; if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(-3,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_r2( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, double* __restrict arg6, const int* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel1_r2 + idx_z * 1 * xdim0_update_halo_kernel1_r2 * ydim0_update_halo_kernel1_r2; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel1_r2 + idx_z * 1 * xdim1_update_halo_kernel1_r2 * ydim1_update_halo_kernel1_r2; arg2 += idx_x * 1 + idx_y * 1 * xdim2_update_halo_kernel1_r2 + idx_z * 1 * xdim2_update_halo_kernel1_r2 * ydim2_update_halo_kernel1_r2; arg3 += idx_x * 1 + idx_y * 1 * xdim3_update_halo_kernel1_r2 + idx_z * 1 * xdim3_update_halo_kernel1_r2 * ydim3_update_halo_kernel1_r2; arg4 += idx_x * 1 + idx_y * 1 * xdim4_update_halo_kernel1_r2 + idx_z * 1 * xdim4_update_halo_kernel1_r2 * ydim4_update_halo_kernel1_r2; arg5 += idx_x * 1 + idx_y * 1 * xdim5_update_halo_kernel1_r2 + idx_z * 1 * xdim5_update_halo_kernel1_r2 * ydim5_update_halo_kernel1_r2; arg6 += idx_x * 1 + idx_y * 1 * xdim6_update_halo_kernel1_r2 + idx_z * 1 * xdim6_update_halo_kernel1_r2 * ydim6_update_halo_kernel1_r2; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_r2(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function void ops_par_loop_update_halo_kernel1_r2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; ops_timing_realloc(47,"update_halo_kernel1_r2"); OPS_kernels[47].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]*args[4].dat->dim; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]*args[5].dat->dim; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]*args[6].dat->dim; int ydim6 = args[6].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel1_r2_h || ydim0 != ydim0_update_halo_kernel1_r2_h || xdim1 != xdim1_update_halo_kernel1_r2_h || ydim1 != ydim1_update_halo_kernel1_r2_h || xdim2 != xdim2_update_halo_kernel1_r2_h || ydim2 != ydim2_update_halo_kernel1_r2_h || xdim3 != xdim3_update_halo_kernel1_r2_h || ydim3 != ydim3_update_halo_kernel1_r2_h || xdim4 != xdim4_update_halo_kernel1_r2_h || ydim4 != ydim4_update_halo_kernel1_r2_h || xdim5 != xdim5_update_halo_kernel1_r2_h || ydim5 != ydim5_update_halo_kernel1_r2_h || xdim6 != xdim6_update_halo_kernel1_r2_h || ydim6 != ydim6_update_halo_kernel1_r2_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel1_r2, &xdim0, sizeof(int) ); xdim0_update_halo_kernel1_r2_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel1_r2, &ydim0, sizeof(int) ); ydim0_update_halo_kernel1_r2_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel1_r2, &xdim1, sizeof(int) ); xdim1_update_halo_kernel1_r2_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel1_r2, &ydim1, sizeof(int) ); ydim1_update_halo_kernel1_r2_h = ydim1; hipMemcpyToSymbol( xdim2_update_halo_kernel1_r2, &xdim2, sizeof(int) ); xdim2_update_halo_kernel1_r2_h = xdim2; hipMemcpyToSymbol( ydim2_update_halo_kernel1_r2, &ydim2, sizeof(int) ); ydim2_update_halo_kernel1_r2_h = ydim2; hipMemcpyToSymbol( xdim3_update_halo_kernel1_r2, &xdim3, sizeof(int) ); xdim3_update_halo_kernel1_r2_h = xdim3; hipMemcpyToSymbol( ydim3_update_halo_kernel1_r2, &ydim3, sizeof(int) ); ydim3_update_halo_kernel1_r2_h = ydim3; hipMemcpyToSymbol( xdim4_update_halo_kernel1_r2, &xdim4, sizeof(int) ); xdim4_update_halo_kernel1_r2_h = xdim4; hipMemcpyToSymbol( ydim4_update_halo_kernel1_r2, &ydim4, sizeof(int) ); ydim4_update_halo_kernel1_r2_h = ydim4; hipMemcpyToSymbol( xdim5_update_halo_kernel1_r2, &xdim5, sizeof(int) ); xdim5_update_halo_kernel1_r2_h = xdim5; hipMemcpyToSymbol( ydim5_update_halo_kernel1_r2, &ydim5, sizeof(int) ); ydim5_update_halo_kernel1_r2_h = ydim5; hipMemcpyToSymbol( xdim6_update_halo_kernel1_r2, &xdim6, sizeof(int) ); xdim6_update_halo_kernel1_r2_h = xdim6; hipMemcpyToSymbol( ydim6_update_halo_kernel1_r2, &ydim6, sizeof(int) ); ydim6_update_halo_kernel1_r2_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; char *p_a[8]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif //OPS_MPI int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif //OPS_MPI int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif //OPS_MPI int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); ops_timers_core(&c1,&t1); OPS_kernels[47].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel1_r2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[47].time += t2-t1; ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); ops_set_halo_dirtybit3(&args[6],range); //Update kernel record OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg3); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg4); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg5); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg6); }
b8668d694fe871ef3a7e1a74697add122e7909da.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_r2; int xdim0_update_halo_kernel1_r2_h = -1; __constant__ int ydim0_update_halo_kernel1_r2; int ydim0_update_halo_kernel1_r2_h = -1; __constant__ int xdim1_update_halo_kernel1_r2; int xdim1_update_halo_kernel1_r2_h = -1; __constant__ int ydim1_update_halo_kernel1_r2; int ydim1_update_halo_kernel1_r2_h = -1; __constant__ int xdim2_update_halo_kernel1_r2; int xdim2_update_halo_kernel1_r2_h = -1; __constant__ int ydim2_update_halo_kernel1_r2; int ydim2_update_halo_kernel1_r2_h = -1; __constant__ int xdim3_update_halo_kernel1_r2; int xdim3_update_halo_kernel1_r2_h = -1; __constant__ int ydim3_update_halo_kernel1_r2; int ydim3_update_halo_kernel1_r2_h = -1; __constant__ int xdim4_update_halo_kernel1_r2; int xdim4_update_halo_kernel1_r2_h = -1; __constant__ int ydim4_update_halo_kernel1_r2; int ydim4_update_halo_kernel1_r2_h = -1; __constant__ int xdim5_update_halo_kernel1_r2; int xdim5_update_halo_kernel1_r2_h = -1; __constant__ int ydim5_update_halo_kernel1_r2; int ydim5_update_halo_kernel1_r2_h = -1; __constant__ int xdim6_update_halo_kernel1_r2; int xdim6_update_halo_kernel1_r2_h = -1; __constant__ int ydim6_update_halo_kernel1_r2; int ydim6_update_halo_kernel1_r2_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_r2*(y)+xdim0_update_halo_kernel1_r2*ydim0_update_halo_kernel1_r2*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_r2*(y)+xdim1_update_halo_kernel1_r2*ydim1_update_halo_kernel1_r2*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_r2*(y)+xdim2_update_halo_kernel1_r2*ydim2_update_halo_kernel1_r2*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_r2*(y)+xdim3_update_halo_kernel1_r2*ydim3_update_halo_kernel1_r2*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_r2*(y)+xdim4_update_halo_kernel1_r2*ydim4_update_halo_kernel1_r2*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_r2*(y)+xdim5_update_halo_kernel1_r2*ydim5_update_halo_kernel1_r2*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_r2*(y)+xdim6_update_halo_kernel1_r2*ydim6_update_halo_kernel1_r2*(z)) //user function __device__ inline void update_halo_kernel1_r2(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed , const int* fields) { if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(-3,0,0)]; if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(-3,0,0)]; if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(-3,0,0)]; if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(-3,0,0)]; if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(-3,0,0)]; if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(-3,0,0)]; if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(-3,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_r2( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, double* __restrict arg6, const int* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel1_r2 + idx_z * 1 * xdim0_update_halo_kernel1_r2 * ydim0_update_halo_kernel1_r2; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel1_r2 + idx_z * 1 * xdim1_update_halo_kernel1_r2 * ydim1_update_halo_kernel1_r2; arg2 += idx_x * 1 + idx_y * 1 * xdim2_update_halo_kernel1_r2 + idx_z * 1 * xdim2_update_halo_kernel1_r2 * ydim2_update_halo_kernel1_r2; arg3 += idx_x * 1 + idx_y * 1 * xdim3_update_halo_kernel1_r2 + idx_z * 1 * xdim3_update_halo_kernel1_r2 * ydim3_update_halo_kernel1_r2; arg4 += idx_x * 1 + idx_y * 1 * xdim4_update_halo_kernel1_r2 + idx_z * 1 * xdim4_update_halo_kernel1_r2 * ydim4_update_halo_kernel1_r2; arg5 += idx_x * 1 + idx_y * 1 * xdim5_update_halo_kernel1_r2 + idx_z * 1 * xdim5_update_halo_kernel1_r2 * ydim5_update_halo_kernel1_r2; arg6 += idx_x * 1 + idx_y * 1 * xdim6_update_halo_kernel1_r2 + idx_z * 1 * xdim6_update_halo_kernel1_r2 * ydim6_update_halo_kernel1_r2; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_r2(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function void ops_par_loop_update_halo_kernel1_r2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; ops_timing_realloc(47,"update_halo_kernel1_r2"); OPS_kernels[47].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]*args[4].dat->dim; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]*args[5].dat->dim; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]*args[6].dat->dim; int ydim6 = args[6].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel1_r2_h || ydim0 != ydim0_update_halo_kernel1_r2_h || xdim1 != xdim1_update_halo_kernel1_r2_h || ydim1 != ydim1_update_halo_kernel1_r2_h || xdim2 != xdim2_update_halo_kernel1_r2_h || ydim2 != ydim2_update_halo_kernel1_r2_h || xdim3 != xdim3_update_halo_kernel1_r2_h || ydim3 != ydim3_update_halo_kernel1_r2_h || xdim4 != xdim4_update_halo_kernel1_r2_h || ydim4 != ydim4_update_halo_kernel1_r2_h || xdim5 != xdim5_update_halo_kernel1_r2_h || ydim5 != ydim5_update_halo_kernel1_r2_h || xdim6 != xdim6_update_halo_kernel1_r2_h || ydim6 != ydim6_update_halo_kernel1_r2_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel1_r2, &xdim0, sizeof(int) ); xdim0_update_halo_kernel1_r2_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel1_r2, &ydim0, sizeof(int) ); ydim0_update_halo_kernel1_r2_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel1_r2, &xdim1, sizeof(int) ); xdim1_update_halo_kernel1_r2_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel1_r2, &ydim1, sizeof(int) ); ydim1_update_halo_kernel1_r2_h = ydim1; cudaMemcpyToSymbol( xdim2_update_halo_kernel1_r2, &xdim2, sizeof(int) ); xdim2_update_halo_kernel1_r2_h = xdim2; cudaMemcpyToSymbol( ydim2_update_halo_kernel1_r2, &ydim2, sizeof(int) ); ydim2_update_halo_kernel1_r2_h = ydim2; cudaMemcpyToSymbol( xdim3_update_halo_kernel1_r2, &xdim3, sizeof(int) ); xdim3_update_halo_kernel1_r2_h = xdim3; cudaMemcpyToSymbol( ydim3_update_halo_kernel1_r2, &ydim3, sizeof(int) ); ydim3_update_halo_kernel1_r2_h = ydim3; cudaMemcpyToSymbol( xdim4_update_halo_kernel1_r2, &xdim4, sizeof(int) ); xdim4_update_halo_kernel1_r2_h = xdim4; cudaMemcpyToSymbol( ydim4_update_halo_kernel1_r2, &ydim4, sizeof(int) ); ydim4_update_halo_kernel1_r2_h = ydim4; cudaMemcpyToSymbol( xdim5_update_halo_kernel1_r2, &xdim5, sizeof(int) ); xdim5_update_halo_kernel1_r2_h = xdim5; cudaMemcpyToSymbol( ydim5_update_halo_kernel1_r2, &ydim5, sizeof(int) ); ydim5_update_halo_kernel1_r2_h = ydim5; cudaMemcpyToSymbol( xdim6_update_halo_kernel1_r2, &xdim6, sizeof(int) ); xdim6_update_halo_kernel1_r2_h = xdim6; cudaMemcpyToSymbol( ydim6_update_halo_kernel1_r2, &ydim6, sizeof(int) ); ydim6_update_halo_kernel1_r2_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; char *p_a[8]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif //OPS_MPI int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif //OPS_MPI int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif //OPS_MPI int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); ops_timers_core(&c1,&t1); OPS_kernels[47].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_update_halo_kernel1_r2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[47].time += t2-t1; ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); ops_set_halo_dirtybit3(&args[6],range); //Update kernel record OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg3); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg4); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg5); OPS_kernels[47].transfer += ops_compute_transfer(dim, range, &arg6); }
0f7bb1e803101098180175be5f7fd0ff166841c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (C) 2015 Davis E. King ([email protected]) // License: Boost Software License See LICENSE.txt for the full license. #include "cuda_utils.h" #include "cuda_dlib.h" #include "cudnn_dlibapi.h" #include <math_constants.h> namespace dlib { namespace cuda { // ----------------------------------------------------------------------------------- void set_device ( int dev ) { CHECK_CUDA(hipSetDevice(dev)); } int get_device ( ) { int dev = 0; CHECK_CUDA(hipGetDevice(&dev)); return dev; } std::string get_device_name ( int device ) { hipDeviceProp_t props; CHECK_CUDA(hipGetDeviceProperties(&props, device)); return props.name; } void set_current_device_blocking_sync( ) { CHECK_CUDA(hipSetDeviceFlags(hipDeviceScheduleBlockingSync)); } int get_num_devices ( ) { int num_devices; CHECK_CUDA(hipGetDeviceCount(&num_devices)); return num_devices; } bool can_access_peer (int device_id, int peer_device_id) { int can_access; CHECK_CUDA(hipDeviceCanAccessPeer(&can_access, device_id, peer_device_id)); return can_access != 0; } bool can_access_peer (const tensor& device, const tensor& peer_device) { return can_access_peer(device.device_id(), peer_device.device_id()); } void device_synchronize (int dev) { raii_set_device set_dev(dev); CHECK_CUDA(hipDeviceSynchronize()); } void device_synchronize (const tensor& dev) { device_synchronize(dev.device_id()); } enable_peer_access:: enable_peer_access( int device_id, int peer_device_id ) : call_disable(false), device_id(device_id), peer_device_id(peer_device_id) { raii_set_device set_dev(device_id); auto err = hipDeviceEnablePeerAccess(peer_device_id, 0); if (err == hipSuccess) { call_disable = true; } else if (err == hipErrorPeerAccessAlreadyEnabled) { // call hipGetLastError() to dispose of this error since we don't // care. auto err2 = hipGetLastError(); if (err2 != hipErrorPeerAccessAlreadyEnabled) CHECK_CUDA(err2); } else { CHECK_CUDA(err); } } enable_peer_access:: ~enable_peer_access() noexcept(false) { if (call_disable) { raii_set_device set_dev(device_id); CHECK_CUDA(hipDeviceDisablePeerAccess(peer_device_id)); } } // ----------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------- __global__ void _cuda_inverse_norms(float* invnorms, const float* data, size_t nr, size_t nc, const float eps) { // initialize invnorms before we begin. for (auto i : grid_stride_range_y(0, nr)) for (auto j : grid_stride_range(0, 1)) invnorms[i] = eps; __syncthreads(); for (auto i : grid_stride_range_y(0, nr)) { auto p = data + i*nc; float temp = 0; for (auto j : grid_stride_range(0, nc)) temp += p[j]*p[j]; // and store the sum into invnorms[i] warp_reduce_atomic_add(invnorms[i], temp); } __syncthreads(); for (auto i : grid_stride_range_y(0, nr)) for (auto j : grid_stride_range(0, 1)) invnorms[i] = 1.0/std::sqrt(invnorms[i]); } void inverse_norms ( resizable_tensor& invnorms, const tensor& data, const double eps ) { invnorms.set_size(data.num_samples()); launch_kernel(_cuda_inverse_norms, max_jobs(data.size()/data.num_samples(), data.num_samples()), invnorms.device(), data.device(), data.num_samples(), data.size()/data.num_samples(), eps); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_dot_prods(float* out, const float* lhs, const float* rhs, size_t nr, size_t nc) { // initialize out before we begin. for (auto i : grid_stride_range_y(0, nr)) for (auto j : grid_stride_range(0, 1)) out[i] = 0; __syncthreads(); for (auto i : grid_stride_range_y(0, nr)) { auto l = lhs + i*nc; auto r = rhs + i*nc; float temp = 0; for (auto j : grid_stride_range(0, nc)) temp += l[j]*r[j]; // and store the sum into out[i] warp_reduce_atomic_add(out[i], temp); } } __global__ void _cuda_dot_prods_add_to(float* out, const float* lhs, const float* rhs, size_t nr, size_t nc) { for (auto i : grid_stride_range_y(0, nr)) { auto l = lhs + i*nc; auto r = rhs + i*nc; float temp = 0; for (auto j : grid_stride_range(0, nc)) temp += l[j]*r[j]; // and store the sum into out[i] warp_reduce_atomic_add(out[i], temp); } } void dot_prods ( resizable_tensor& out, const tensor& lhs, const tensor& rhs ) { DLIB_CASSERT(have_same_dimensions(lhs,rhs)); out.set_size(lhs.num_samples()); if (out.size() == 0) return; const auto nr = lhs.num_samples(); const auto nc = lhs.size()/lhs.num_samples(); launch_kernel(_cuda_dot_prods, max_jobs(nc,nr), out.device_write_only(), lhs.device(), rhs.device(), nr, nc); } void dot_prods ( bool add_to, tensor& out, const tensor& lhs, const tensor& rhs ) { DLIB_CASSERT(have_same_dimensions(lhs,rhs)); DLIB_CASSERT(out.k() == 1 && out.nr() == 1 && out.nc() == 1); DLIB_CASSERT(out.size() == lhs.num_samples()); const auto nr = lhs.num_samples(); const auto nc = lhs.size()/lhs.num_samples(); if (add_to) launch_kernel(_cuda_dot_prods_add_to, max_jobs(nc,nr), out.device(), lhs.device(), rhs.device(), nr, nc); else launch_kernel(_cuda_dot_prods, max_jobs(nc,nr), out.device_write_only(), lhs.device(), rhs.device(), nr, nc); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_scale_columns(float* out, const float* m, const float* v, size_t nr, size_t nc) { for (auto j : grid_stride_range(0, nr*nc)) { out[j] = m[j]*v[j%nc]; } } void scale_columns ( tensor& out, const tensor& m, const tensor& v ) { launch_kernel(_cuda_scale_columns, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_scale_rows(float* out, const float* m, const float* v, size_t nr, size_t nc) { for (auto j : grid_stride_range(0, nr*nc)) { out[j] = m[j]*v[j/nc]; } } void scale_rows ( tensor& out, const tensor& m, const tensor& v ) { launch_kernel(_cuda_scale_rows, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_scale_rows2(float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc) { for (auto j : grid_stride_range(0, nr*nc)) { out[j] = (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc]; } } __global__ void _cuda_scale_rows2_beta(const float beta, float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc) { for (auto j : grid_stride_range(0, nr*nc)) { out[j] = beta*out[j] + (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc]; } } void scale_rows2 ( float beta, tensor& out, const tensor& m1, const tensor& m2, const tensor& v1, const tensor& v2 ) { if (beta == 0) { launch_kernel(_cuda_scale_rows2, max_jobs(m1.size()), out.device(), m1.device(), m2.device(), v1.device(), v2.device(), m1.num_samples(), m1.size()/m1.num_samples()); } else { launch_kernel(_cuda_scale_rows2_beta, max_jobs(m1.size()), beta, out.device(), m1.device(), m2.device(), v1.device(), v2.device(), m1.num_samples(), m1.size()/m1.num_samples()); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_exp(float* dest, const float* src, size_t n) { for (auto i : grid_stride_range(0, n)) dest[i] = ::exp(src[i]); } void exp ( tensor& dest, const tensor& src ) { DLIB_ASSERT(dest.size() == src.size()); launch_kernel(_cuda_exp, max_jobs(src.size()), dest.device(), src.device(), src.size()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_log(float* dest, const float* src, size_t n) { for (auto i : grid_stride_range(0, n)) dest[i] = ::log(src[i]); } void log ( tensor& dest, const tensor& src ) { DLIB_ASSERT(dest.size() == src.size()); launch_kernel(_cuda_log, max_jobs(src.size()), dest.device(), src.device(), src.size()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_log10(float* dest, const float* src, size_t n) { for (auto i : grid_stride_range(0, n)) dest[i] = ::log10(src[i]); } void log10 ( tensor& dest, const tensor& src ) { DLIB_ASSERT(dest.size() == src.size()); launch_kernel(_cuda_log10, max_jobs(src.size()), dest.device(), src.device(), src.size()); } // ----------------------------------------------------------------------------------- __global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i]*s2[i]; } } __global__ void _cuda_multiply2(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n, size_t max_size) { for (auto i : grid_stride_range(0, n)) { d[i] = 0; for (size_t j = i; j < max_size; j += n) d[i] += s1[j%s1_n]*s2[j%s2_n]; } } __global__ void _cuda_multiply3(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i%s1_n]*s2[i%s2_n]; } } __global__ void _cuda_multiply1_add_to(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] += s1[i]*s2[i]; } } __global__ void _cuda_multiply2_add_to(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n, size_t max_size) { for (auto i : grid_stride_range(0, n)) { for (size_t j = i; j < max_size; j += n) d[i] += s1[j%s1_n]*s2[j%s2_n]; } } __global__ void _cuda_multiply3_add_to(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n) { for (auto i : grid_stride_range(0, n)) { d[i] += s1[i%s1_n]*s2[i%s2_n]; } } void multiply ( bool add_to, tensor& dest, const tensor& src1, const tensor& src2 ) { DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() && dest.nr() == src1.nr() && src1.nr() == src2.nr() && dest.nc() == src1.nc() && src1.nc() == src2.nc() ); const long MD = ::max(::max(dest.num_samples(),src1.num_samples()),src2.num_samples()); DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) && (src1.num_samples()==1 || src1.num_samples()==MD) && (src2.num_samples()==1 || src2.num_samples()==MD) ); if (dest.size() == 0) return; const size_t max_size = ::max(::max(dest.size(),src1.size()),src2.size()); const auto d = dest.host(); const auto s1 = src1.host(); const auto s2 = src2.host(); if (dest.size() == src1.size() && src1.size() == src2.size()) { if (add_to) launch_kernel(_cuda_multiply1_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size()); else launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size()); } else if (dest.num_samples() == 1) { if (add_to) launch_kernel(_cuda_multiply2_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size(), max_size); else launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size(), max_size); } else { if (add_to) launch_kernel(_cuda_multiply3_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size()); else launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size()); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { for (auto i : grid_stride_range(0, n)) { auto k = (i/bs)%ks; d[i] = s1[i]*s2[k]; } } __global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { // zero initialize d before we begin. for (auto i : grid_stride_range_y(0, ks)) for (auto j : grid_stride_range(0, 1)) d[i] = 0; __syncthreads(); // loop over all the image planes for (auto i : grid_stride_range_y(0, n)) { // sum all the elements in the i-th image plane float temp = 0; for (auto j : grid_stride_range(i*bs, (i+1)*bs)) temp += s1[j]*s2[j]; auto k = i%ks; // and store the sum into d[k] warp_reduce_atomic_add(d[k], temp); } } __global__ void _cuda_multiply_conv_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { for (auto i : grid_stride_range(0, n)) { auto k = (i/bs)%ks; d[i] += s1[i]*s2[k]; } } __global__ void _cuda_multiply_conv2_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { // loop over all the image planes for (auto i : grid_stride_range_y(0, n)) { // sum all the elements in the i-th image plane float temp = 0; for (auto j : grid_stride_range(i*bs, (i+1)*bs)) temp += s1[j]*s2[j]; auto k = i%ks; // and store the sum into d[k] warp_reduce_atomic_add(d[k], temp); } } void multiply_conv ( bool add_to, tensor& dest, const tensor& src1, const tensor& src2 ) { if (have_same_dimensions(dest,src1)) { DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k()); if (dest.size() == 0) return; if (add_to) launch_kernel(_cuda_multiply_conv_add_to,max_jobs(dest.size()), dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k()); else launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()), dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k()); } else { DLIB_CASSERT(have_same_dimensions(src1,src2)); DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k()); if (dest.size() == 0) return; const auto bs = src1.nr()*src1.nc(); const auto n = src1.num_samples()*src1.k(); if (add_to) launch_kernel(_cuda_multiply_conv2_add_to, max_jobs(bs,n), dest.device(), src1.device(), n, src2.device(), bs, src1.k()); else launch_kernel(_cuda_multiply_conv2, max_jobs(bs,n), dest.device(), src1.device(), n, src2.device(), bs, src1.k()); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_scale_channels_add_to(float* d, const float* src, size_t n, const float* scales, size_t bs) { for (auto i : grid_stride_range(0, n)) { auto k = i/bs; d[i] += src[i]*scales[k]; } } __global__ void _cuda_scale_channels(float* d, const float* src, size_t n, const float* scales, size_t bs) { for (auto i : grid_stride_range(0, n)) { auto k = i/bs; d[i] = src[i]*scales[k]; } } void scale_channels ( bool add_to, tensor& dest, const tensor& src, const tensor& scales ) { DLIB_CASSERT(have_same_dimensions(dest,src) && scales.num_samples() == src.num_samples() && scales.k() == src.k() && scales.nr() == 1 && scales.nc() == 1 ); if (dest.size() == 0) return; if (add_to) launch_kernel(_cuda_scale_channels_add_to,max_jobs(dest.size()), dest.device(), src.device(), src.size(), scales.device(), src.nr()*src.nc()); else launch_kernel(_cuda_scale_channels,max_jobs(dest.size()), dest.device_write_only(), src.device(), src.size(), scales.device(), src.nr()*src.nc()); } // ------------------------------------------------------------------------------------ __global__ void _cuda_mult1(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i]*s2[i]; } } __global__ void _cuda_mult1_add_to(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] += s1[i]*s2[i]; } } __global__ void _cuda_mult2(float* d, const float* s1, const float* s2, size_t dn, size_t dk, size_t dr, size_t dc, size_t s1n, size_t s1k, size_t s1r, size_t s1c, size_t s2n, size_t s2k, size_t s2r, size_t s2c) { for (auto i : grid_stride_range(0, dn*dk*dr*dc)) { size_t n,k,r,c; unpack_idx(i, dk,dr,dc, n,k,r,c); float v1 = 0; float v2 = 0; if (n < s1n && k < s1k && r < s1r && c < s1c ) { v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)]; } if (n < s2n && k < s2k && r < s2r && c < s2c ) { v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)]; } d[i] = v1*v2; } } __global__ void _cuda_mult2_add_to(float* d, const float* s1, const float* s2, size_t dn, size_t dk, size_t dr, size_t dc, size_t s1n, size_t s1k, size_t s1r, size_t s1c, size_t s2n, size_t s2k, size_t s2r, size_t s2c) { for (auto i : grid_stride_range(0, dn*dk*dr*dc)) { size_t n,k,r,c; unpack_idx(i, dk,dr,dc, n,k,r,c); float v1 = 0; float v2 = 0; if (n < s1n && k < s1k && r < s1r && c < s1c ) { v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)]; } if (n < s2n && k < s2k && r < s2r && c < s2c ) { v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)]; } d[i] += v1*v2; } } void multiply_zero_padded ( bool add_to, tensor& dest, const tensor& src1, const tensor& src2 ) { if (dest.size() == 0) return; // Do the simple and fast version if everything has the same dimensions if (have_same_dimensions(dest, src1) && have_same_dimensions(dest, src2)) { if (add_to) launch_kernel(_cuda_mult1_add_to,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size()); else launch_kernel(_cuda_mult1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size()); } else { if (add_to) { // Otherwise, do the more complex version with bounds checking. launch_kernel(_cuda_mult2_add_to,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.num_samples(), dest.k(), dest.nr(), dest.nc(), src1.num_samples(), src1.k(), src1.nr(), src1.nc(), src2.num_samples(), src2.k(), src2.nr(), src2.nc() ); } else { // Otherwise, do the more complex version with bounds checking. launch_kernel(_cuda_mult2,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.num_samples(), dest.k(), dest.nr(), dest.nc(), src1.num_samples(), src1.k(), src1.nr(), src1.nc(), src2.num_samples(), src2.k(), src2.nr(), src2.nc() ); } } } // ------------------------------------------------------------------------------------ __global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i]+s2[i]; } } __global__ void _cuda_add2(float* d, const float* s1, const float* s2, size_t dn, size_t dk, size_t dr, size_t dc, size_t s1n, size_t s1k, size_t s1r, size_t s1c, size_t s2n, size_t s2k, size_t s2r, size_t s2c) { for (auto i : grid_stride_range(0, dn*dk*dr*dc)) { size_t n,k,r,c; unpack_idx(i, dk,dr,dc, n,k,r,c); float v1 = 0; float v2 = 0; if (n < s1n && k < s1k && r < s1r && c < s1c ) { v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)]; } if (n < s2n && k < s2k && r < s2r && c < s2c ) { v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)]; } d[i] = v1+v2; } } void add ( tensor& dest, const tensor& src1, const tensor& src2 ) { if (dest.size() == 0) return; // Do the simple and fast version if everything has the same dimensions if (have_same_dimensions(dest, src1) && have_same_dimensions(dest, src2)) { launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size()); } else { // Otherwise, do the more complex version with bounds checking. launch_kernel(_cuda_add2,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.num_samples(), dest.k(), dest.nr(), dest.nc(), src1.num_samples(), src1.k(), src1.nr(), src1.nc(), src2.num_samples(), src2.k(), src2.nr(), src2.nc() ); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s[i] + B; } } __global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s[i]; } } void affine_transform( tensor& dest, const tensor& src, const float A, const float B ) { DLIB_CASSERT(dest.size()==src.size()); if (B != 0) launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B); else launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A); } void affine_transform( tensor& dest, const tensor& src, const float A ) { DLIB_CASSERT(dest.size()==src.size()); launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform_rect( float* d, const float* s1, const float* s2, const float* s3, float A, float B, float C, size_t start_idx, size_t n, size_t rect_nc, size_t total_nc ) { for (auto i : grid_stride_range(0, n)) { size_t r = i/rect_nc; size_t c = i%rect_nc; size_t idx = r*total_nc + c + start_idx; d[idx] = A*s1[idx] + B*s2[idx] + C*s3[idx]; } } void affine_transform( const rectangle& rect, tensor& dest, const tensor& src1, const tensor& src2, const tensor& src3, float A, float B, float C ) { DLIB_CASSERT(dest.size() == src1.size()); DLIB_CASSERT(dest.size() == src2.size()); DLIB_CASSERT(dest.size() == src3.size()); DLIB_CASSERT(dest.num_samples() == src1.num_samples()); DLIB_CASSERT(dest.num_samples() == src2.num_samples()); DLIB_CASSERT(dest.num_samples() == src3.num_samples()); DLIB_CASSERT(rectangle(0,0, dest.size()/dest.num_samples()-1, dest.num_samples()-1).contains(rect)); launch_kernel(_cuda_affine_transform_rect,max_jobs(rect.area()), dest.device(), src1.device(), src2.device(), src3.device(), A, B, C, rect.left() + rect.top()*(dest.size()/dest.num_samples()), rect.area(), rect.width(), dest.size()/dest.num_samples()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i] + C; } } __global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i]; } } void affine_transform( tensor& dest, const tensor& src1, const tensor& src2, const float A, const float B, const float C ) { DLIB_CASSERT(dest.size()==src1.size()); DLIB_CASSERT(dest.size()==src2.size()); if (C != 0) launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C); else launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B); } void affine_transform( tensor& dest, const tensor& src1, const tensor& src2, const float A, const float B ) { DLIB_CASSERT(dest.size()==src1.size()); DLIB_CASSERT(dest.size()==src2.size()); launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale) { for (auto i : grid_stride_range(0, n)) { d[i] += scale*s[i]; } } void add_scaled( tensor& dest, const float scale, const tensor& src ) { DLIB_CASSERT(dest.size()==src.size()); launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_add_cv_to_all_columns(float beta, float* dest, float alpha, const float* src, size_t size, size_t stride) { for (auto i : grid_stride_range(0, size)) { dest[i] = beta*dest[i] + alpha*src[i/stride]; } } __global__ void _cuda_add_cv_to_all_columns_no_beta(float* dest, float alpha, const float* src, size_t size, size_t stride) { for (auto i : grid_stride_range(0, size)) { dest[i] = alpha*src[i/stride]; } } void add_cv_to_all_columns( float beta, tensor& dest, float alpha, const tensor& src ) { DLIB_CASSERT(dest.num_samples() == src.num_samples() && src.num_samples() == src.size()); if (beta == 0) launch_kernel(_cuda_add_cv_to_all_columns_no_beta, max_jobs(dest.size()), dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples()); else launch_kernel(_cuda_add_cv_to_all_columns, max_jobs(dest.size()), beta, dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform5( float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D ) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D; } } void affine_transform( tensor& dest, const tensor& src1, const tensor& src2, const tensor& src3, const float A, const float B, const float C, const float D ) { DLIB_CASSERT(dest.size()==src1.size()); DLIB_CASSERT(dest.size()==src2.size()); DLIB_CASSERT(dest.size()==src3.size()); launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src3.device(), dest.size(), A, B, C, D); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform_range( float* d, const float* s1, const float* s2, const float* s3, size_t begin, size_t end, float A, float B, float C ) { for (auto i : grid_stride_range(begin, end)) { d[i] = A*s1[i] + B*s2[i] + C*s3[i]; } } void affine_transform_range( size_t begin, size_t end, tensor& dest, const tensor& src1, const tensor& src2, const tensor& src3, const float A, const float B, const float C ) { DLIB_CASSERT(dest.size()==src1.size()); DLIB_CASSERT(dest.size()==src2.size()); DLIB_CASSERT(dest.size()==src3.size()); DLIB_CASSERT(begin <= end && end <= dest.size()); launch_kernel(_cuda_affine_transform_range,max_jobs(end-begin), dest.device(), src1.device(), src2.device(), src3.device(), begin, end, A, B, C); } // ----------------------------------------------------------------------------------- __global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B) { for (auto i : grid_stride_range(0, n)) { d[i] = A[i]*s[i] + B[i]; } } __global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs) { for (auto i : grid_stride_range(0, n)) { d[i] = A[i%bs]*s[i] + B[i%bs]; } } void affine_transform( tensor& dest, const tensor& src, const tensor& A, const tensor& B ) { DLIB_CASSERT(have_same_dimensions(dest, src)); DLIB_CASSERT( ((A.num_samples()==1 && B.num_samples()==1) || (A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples()))); DLIB_CASSERT( A.nr()==B.nr() && B.nr()==src.nr() && A.nc()==B.nc() && B.nc()==src.nc() && A.k() ==B.k() && B.k()==src.k(), "\nA.nr(): " << A.nr() << "\nB.nr(): " << B.nr() << "\nsrc.nr(): " << src.nr() <<"\nA.nc(): " << A.nc() << "\nB.nc(): " << B.nc() << "\nsrc.nc(): " << src.nc() <<"\nA.k(): " << A.k() << "\nB.k(): " << B.k() << "\nsrc.k(): " << src.k() ); if (A.num_samples() == 1) { launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size()); } else { launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device()); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_compute_adam_update( size_t begin, size_t end, float* s, float* m, float* v, const float alpha, const float weight_decay, const float momentum1, const float momentum2, const float* params, const float* params_grad ) { const float eps = 1e-8; // The loop is equivalent to doing this: // m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad); // v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad); // s = -alpha*m/(sqrt(v) + eps); for (auto i : grid_stride_range(begin, end)) { float g = (weight_decay*params[i] + params_grad[i]); m[i] = momentum1*m[i] + (1-momentum1)*g; v[i] = momentum2*v[i] + (1-momentum2)*g*g; s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps); } } void compute_adam_update ( size_t begin, size_t end, tensor& s, tensor& m, tensor& v, const float t, const float learning_rate, const float weight_decay, const float momentum1, const float momentum2, const tensor& params, const tensor& params_grad ) { DLIB_CASSERT(s.size() == m.size() && s.size() == v.size() && s.size() == params.size() && s.size() == params_grad.size()); DLIB_CASSERT(begin <= end && end <= params.size()); const float alpha = learning_rate*std::sqrt(1-::pow(momentum2,t))/(1-::pow(momentum1, t)); launch_kernel(_cuda_compute_adam_update,max_jobs(end-begin), begin, end, s.device(), m.device(), v.device(), alpha, weight_decay, momentum1, momentum2, params.device(), params_grad.device()); } // ----------------------------------------------------------------------------------- __global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks) { for (auto i : grid_stride_range(0, n)) { auto k = (i/bs)%ks; d[i] = A[k]*s[i] + B[k]; } } void affine_transform_conv( tensor& dest, const tensor& src, const tensor& A, const tensor& B ) { DLIB_CASSERT(have_same_dimensions(dest, src)); DLIB_CASSERT(have_same_dimensions(A, B)); DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k()); launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()), dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k()); } // ----------------------------------------------------------------------------------- __global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n) { for (auto i : grid_stride_range(0, n)) { out[i] = in[i]; for (size_t j = i+n; j < total_n; j+=n) out[i] += in[j]; } } void assign_bias_gradient ( tensor& grad, const tensor& gradient_input ) { DLIB_CASSERT( grad.num_samples() == 1 && gradient_input.k() == grad.k() && gradient_input.nr() == grad.nr() && gradient_input.nc() == grad.nc() && gradient_input.size() > 0); launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size()); } // ---------------------------------------------------------------------------------------- __global__ void _set_tensor(float* out, size_t n, const float val) { for (auto i : grid_stride_range(0, n)) out[i] = val; } void set_tensor ( tensor& t, float value ) { launch_kernel(_set_tensor, max_jobs(t.size()), t.device(), t.size(), value); } // ---------------------------------------------------------------------------------------- __global__ void _scale_tensor(float* out, size_t n, const float val) { for (auto i : grid_stride_range(0, n)) out[i] *= val; } void scale_tensor ( tensor& t, float value ) { launch_kernel(_scale_tensor, max_jobs(t.size()), t.device(), t.size(), value); } // ----------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------- __global__ void _cuda_threshold(float* d, size_t n, float thresh) { for (auto i : grid_stride_range(0, n)) { d[i] = d[i]>thresh ? 1:0; } } void threshold ( tensor& data, float thresh ) { launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh); } // ------------------------------------------------------------------------------------ __global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result) { // Parallel sum everything into local temp variables. float temp = 0; for(auto i : grid_stride_range(0, n)) temp += a[i]*b[i]; // Then do the warp reduce add thing to merge into one output value. warp_reduce_atomic_add(*result, temp); } void dot ( const tensor& a, const tensor& b, tensor& result, size_t idx ) { DLIB_CASSERT(a.size() == b.size()); DLIB_CASSERT(idx < result.size()); launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp) { const float p = *pp; for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) d[i] = s[i]; else d[i] = p*s[i]; } } void prelu ( tensor& dest, const tensor& src, const tensor& param ) { launch_kernel(_cuda_prelu, max_jobs(dest.size()), src.device(), dest.device(), src.size(), param.device()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad) { const float p = *pp; float pgrad = 0; for(auto i : grid_stride_range(0, n)) { if (s[i] > 0) { out[i] += gi[i]; } else { out[i] += p*gi[i]; pgrad += gi[i]*s[i]; } } // Then do the warp reduce add thing to merge into one output value. warp_reduce_atomic_add(*ppgrad, pgrad); } void prelu_gradient ( tensor& grad, const tensor& src, const tensor& gradient_input, const tensor& param, tensor& params_grad ) { params_grad = 0; launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()), grad.device(), src.device(), gradient_input.device(), grad.size(), param.device(), params_grad.device()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_leaky_relu(const float* s, float* d, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) d[i] = s[i]; else d[i] = alpha * s[i]; } } void leaky_relu( tensor& dest, const tensor &src, const float alpha ) { launch_kernel(_cuda_leaky_relu, max_jobs(dest.size()), src.device(), dest.device(), src.size(), alpha); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_leaky_relu_gradient_inplace(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) out[i] = gi[i]; else out[i] = alpha * gi[i]; } } __global__ void _cuda_leaky_relu_gradient(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) out[i] += gi[i]; else out[i] += alpha * gi[i]; } } void leaky_relu_gradient ( tensor& grad, const tensor& src, const tensor& gradient_input, const float alpha ) { float* out = grad.device(); const float* gi = gradient_input.device(); if (out == gi) { launch_kernel(_cuda_leaky_relu_gradient_inplace, max_jobs(grad.size()), out, src.device(), gi, grad.size(), alpha); } else { launch_kernel(_cuda_leaky_relu_gradient, max_jobs(grad.size()), out, src.device(), gi, grad.size(), alpha); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_mish(const float* s, float* d, size_t n) { for (auto i : grid_stride_range(0, n)) { const auto e = ::exp(s[i]); const auto delta = 2*e + e*e + 2; d[i] = s[i] - 2*s[i]/delta; } } void mish ( tensor& dest, const tensor& src ) { launch_kernel(_cuda_mish, max_jobs(dest.size()), src.device(), dest.device(), src.size()); } // ---------------------------------------------------------------------------------------- __device__ float mish_compute_gradient(float x) { if (x >= 8) return 1.f; if (x <= -8) return 0.f; const auto e = ::exp(x); const auto delta = 2*e + e*e + 2; const auto omega = 4*(x + 1) + 4*e*e + e*e*e + e*(4*x + 6); return e*omega/(delta*delta); } __global__ void _cuda_mish_gradient_inplace(float* out, const float* s, const float* gi, size_t n) { for (auto i : grid_stride_range(0, n)) out[i] = gi[i]*mish_compute_gradient(s[i]); } __global__ void _cuda_mish_gradient(float* out, const float* s, const float* gi, size_t n) { for (auto i : grid_stride_range(0, n)) out[i] += gi[i]*mish_compute_gradient(s[i]); } void mish_gradient ( tensor& grad, const tensor& src, const tensor& gradient_input ) { float* out = grad.device(); const float* gi = gradient_input.device(); if (out == gi) launch_kernel(_cuda_mish_gradient_inplace, max_jobs(grad.size()), out, src.device(), gi, grad.size()); else launch_kernel(_cuda_mish_gradient, max_jobs(grad.size()), out, src.device(), gi, grad.size()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_clipped_relu(const float* s, float* d, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] < 0) d[i] = 0; else if (s[i] > alpha) d[i] = alpha; else d[i] = s[i]; } } void clipped_relu ( tensor& dest, const tensor &src, const float alpha ) { launch_kernel(_cuda_clipped_relu, max_jobs(dest.size()), src.device(), dest.device(), src.size(), alpha); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_clipped_relu_gradient_inplace(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0 && s[i] < alpha) out[i] = gi[i]; else out[i] = 0.f; } } __global__ void _cuda_clipped_relu_gradient(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0 && s[i] < alpha) out[i] += gi[i]; } } void clipped_relu_gradient ( tensor& grad, const tensor& dest, const tensor& gradient_input, const float alpha ) { float* out = grad.device(); const float* gi = gradient_input.device(); if (out == gi) launch_kernel(_cuda_clipped_relu_gradient_inplace, max_jobs(grad.size()), out, dest.device(), gi, grad.size(), alpha); else launch_kernel(_cuda_clipped_relu_gradient, max_jobs(grad.size()), out, dest.device(), gi, grad.size(), alpha); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_elu(const float* s, float* d, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) d[i] = s[i]; else d[i] = alpha * (::exp(s[i]) - 1.0f); } } void elu ( tensor& dest, const tensor &src, const float alpha ) { launch_kernel(_cuda_elu, max_jobs(dest.size()), src.device(), dest.device(), src.size(), alpha); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_elu_gradient_inplace(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) out[i] = gi[i]; else out[i] = (alpha + s[i]) * gi[i]; } } __global__ void _cuda_elu_gradient(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) out[i] += gi[i]; else out[i] += (alpha + s[i]) * gi[i]; } } void elu_gradient ( tensor& grad, const tensor& dest, const tensor& gradient_input, const float alpha ) { float* out = grad.device(); const float* gi = gradient_input.device(); if (out == gi) launch_kernel(_cuda_elu_gradient_inplace, max_jobs(grad.size()), out, dest.device(), gi, grad.size(), alpha); else launch_kernel(_cuda_elu_gradient, max_jobs(grad.size()), out, dest.device(), gi, grad.size(), alpha); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_gelu(const float* s, float* d, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s[i] * normcdf(s[i]); } } void gelu ( tensor& dest, const tensor& src ) { launch_kernel(_cuda_gelu, max_jobs(dest.size()), src.device(), dest.device(), src.size()); } // ---------------------------------------------------------------------------------------- __device__ float gelu_compute_gradient(float x) { const float beta = 1.0f / CUDART_SQRT_2PI; const float cdf = normcdf(x); const float pdf = beta*::exp(-0.5f*x*x); return cdf + x * pdf; } __global__ void _cuda_gelu_gradient_inplace(float* out, const float* s, const float* gi, size_t n) { for (auto i : grid_stride_range(0, n)) out[i] = gi[i]*gelu_compute_gradient(s[i]); } __global__ void _cuda_gelu_gradient(float* out, const float* s, const float* gi, size_t n) { for (auto i : grid_stride_range(0, n)) out[i] += gi[i]*gelu_compute_gradient(s[i]); } void gelu_gradient ( tensor& grad, const tensor& src, const tensor& gradient_input ) { float* out = grad.device(); const float* gi = gradient_input.device(); if (out == gi) launch_kernel(_cuda_gelu_gradient_inplace, max_jobs(grad.size()), out, src.device(), gi, grad.size()); else launch_kernel(_cuda_gelu_gradient, max_jobs(grad.size()), out, src.device(), gi, grad.size()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_resize_bilinear(size_t dsize, size_t dchan_size, size_t dnc, float* d, size_t schan_size, int snr, int snc, const float* s, const float x_scale, const float y_scale) { for(auto i : grid_stride_range(0, dsize)) { const int idx = i%dchan_size; const int channel = i/dchan_size; const int sidx = channel*schan_size; const int r = idx/dnc; const int c = idx%dnc; const float y = r*y_scale; const int top = static_cast<int>(::floorf(y)); const int bottom = ::min(top+1, snr-1); const float tb_frac = y - top; const float x = c*x_scale; const int left = static_cast<int>(::floorf(x)); const int right = ::min(left+1, snc-1); const float lr_frac = x - left; float tl = s[sidx+top*snc+left]; float tr = s[sidx+top*snc+right]; float bl = s[sidx+bottom*snc+left]; float br = s[sidx+bottom*snc+right]; float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) + tb_frac*((1-lr_frac)*bl + lr_frac*br); d[i] = temp; } } __global__ void _cuda_resize_bilinear_strided(size_t dsize, size_t dchan_size, size_t dnc, float* d, size_t schan_size, int snr, int snc, const float* s, const float x_scale, const float y_scale, size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided ) { for(auto i : grid_stride_range(0, dsize)) { const int idx = i%dchan_size; const int channel = i/dchan_size; const int sidx = channel*schan_size; const int r = idx/dnc; const int c = idx%dnc; const int didx = channel*dest_chan_size_strided + r*dest_row_stride+c; const float y = r*y_scale; const int top = static_cast<int>(::floorf(y)); const int bottom = ::min(top+1, snr-1); const float tb_frac = y - top; const float x = c*x_scale; const int left = static_cast<int>(::floorf(x)); const int right = ::min(left+1, snc-1); const float lr_frac = x - left; float tl = s[sidx+top*src_row_stride+left]; float tr = s[sidx+top*src_row_stride+right]; float bl = s[sidx+bottom*src_row_stride+left]; float br = s[sidx+bottom*src_row_stride+right]; float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) + tb_frac*((1-lr_frac)*bl + lr_frac*br); d[didx] = temp; } } void resize_bilinear ( tensor& dest, long long dest_row_stride, long long dest_channel_stride, const tensor& src, long long src_row_stride, long long src_channel_stride ) { DLIB_CASSERT(is_same_object(dest, src)==false); DLIB_CASSERT(dest.num_samples() == src.num_samples()); DLIB_CASSERT(dest.k() == src.k()); if (dest.size() == 0 || src.size() == 0) return; const float x_scale = (src.nc()-1)/(float)std::max<long>((dest.nc()-1),1); const float y_scale = (src.nr()-1)/(float)std::max<long>((dest.nr()-1),1); if (dest.nc() == dest_row_stride && dest.nr()*dest.nc()==dest_channel_stride && src.nc() == src_row_stride && src.nr()*src.nc()==src_channel_stride) { launch_kernel(_cuda_resize_bilinear, dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(), src.nr()*src.nc(), src.nr(), src.nc(), src.device(), x_scale, y_scale); } else { launch_kernel(_cuda_resize_bilinear_strided, dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(), src_channel_stride, src.nr(), src.nc(), src.device(), x_scale, y_scale, dest_row_stride, src_row_stride, dest_channel_stride); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_resize_bilinear_gradient(size_t dsize, size_t dchan_size, size_t dnc, const float* d, size_t schan_size, int snr, int snc, float* s, const float x_scale, const float y_scale) { for(auto i : grid_stride_range(0, dsize)) { const float tmp = d[i]; const int idx = i%dchan_size; const int channel = i/dchan_size; const int sidx = channel*schan_size; const int r = idx/dnc; const int c = idx%dnc; const float y = r*y_scale; const int top = static_cast<int>(::floorf(y)); const int bottom = ::min(top+1, snr-1); const float tb_frac = y - top; const float x = c*x_scale; const int left = static_cast<int>(::floorf(x)); const int right = ::min(left+1, snc-1); const float lr_frac = x - left; atomicAdd(s+sidx+top*snc+left, tmp*(1-tb_frac)*(1-lr_frac)); atomicAdd(s+sidx+top*snc+right, tmp*(1-tb_frac)*(lr_frac)); atomicAdd(s+sidx+bottom*snc+left, tmp*(tb_frac)*(1-lr_frac)); atomicAdd(s+sidx+bottom*snc+right, tmp*(tb_frac)*(lr_frac)); } } __global__ void _cuda_resize_bilinear_gradient_strided(size_t dsize, size_t dchan_size, size_t dnc, const float* d, size_t schan_size, int snr, int snc, float* s, const float x_scale, const float y_scale, size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided ) { for(auto i : grid_stride_range(0, dsize)) { const int idx = i%dchan_size; const int channel = i/dchan_size; const int didx = channel*dest_chan_size_strided; const int sidx = channel*schan_size; const int r = idx/dnc; const int c = idx%dnc; const float tmp = d[didx + r*dest_row_stride+c]; const float y = r*y_scale; const int top = static_cast<int>(::floorf(y)); const int bottom = ::min(top+1, snr-1); const float tb_frac = y - top; const float x = c*x_scale; const int left = static_cast<int>(::floorf(x)); const int right = ::min(left+1, snc-1); const float lr_frac = x - left; atomicAdd(s+sidx+top*src_row_stride+left, tmp*(1-tb_frac)*(1-lr_frac)); atomicAdd(s+sidx+top*src_row_stride+right, tmp*(1-tb_frac)*(lr_frac)); atomicAdd(s+sidx+bottom*src_row_stride+left, tmp*(tb_frac)*(1-lr_frac)); atomicAdd(s+sidx+bottom*src_row_stride+right, tmp*(tb_frac)*(lr_frac)); } } void resize_bilinear_gradient ( tensor& grad, long long grad_row_stride, long long grad_channel_stride, const tensor& gradient_input, long long gradient_input_row_stride, long long gradient_input_channel_stride ) { DLIB_CASSERT(is_same_object(grad, gradient_input)==false); DLIB_CASSERT(gradient_input.num_samples() == grad.num_samples()); DLIB_CASSERT(gradient_input.k() == grad.k()); if (grad.size() == 0 || gradient_input.size() == 0) return; const float x_scale = (grad.nc()-1)/(float)std::max<long>((gradient_input.nc()-1),1); const float y_scale = (grad.nr()-1)/(float)std::max<long>((gradient_input.nr()-1),1); if (grad.nc() == grad_row_stride && grad.nr()*grad.nc()==grad_channel_stride && gradient_input.nc() == gradient_input_row_stride && gradient_input.nr()*gradient_input.nc()==gradient_input_channel_stride) { launch_kernel(_cuda_resize_bilinear_gradient, gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(), grad.nr()*grad.nc(), grad.nr(), grad.nc(), grad.device(), x_scale, y_scale); } else { launch_kernel(_cuda_resize_bilinear_gradient_strided, gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(), grad_channel_stride, grad.nr(), grad.nc(), grad.device(), x_scale, y_scale, gradient_input_row_stride, grad_row_stride, gradient_input_channel_stride); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_layer_normalize(float* out, const float* s, float* m, float* v, const float* g, const float* b, float eps, size_t ns, size_t num) { // compute means and sum of squares for (auto n : grid_stride_range_y(0, ns)) { auto p = s + n * num; float means = 0; float invstds = 0; for (auto i : grid_stride_range(0, num)) { means += p[i]; invstds += p[i] * p[i]; } warp_reduce_atomic_add(m[n], means/num); warp_reduce_atomic_add(v[n], invstds/num); } __syncthreads(); // compute variances for (auto n : grid_stride_range_y(0, ns)) { for (auto i : grid_stride_range(0, 1)) { auto var = v[n] - m[n] * m[n]; v[n] = 1.0f / std::sqrt(var + eps); } } __syncthreads(); for (auto n : grid_stride_range_y(0, ns)) { for (auto i : grid_stride_range(0, num)) { const float val = (s[n*num+i]-m[n])*v[n]; out[n*num+i] = val*g[i]+b[i]; } } } __global__ void _cuda_layer_normalize_gradient(float* out, float* gg, float* bg, const float* s, const float* gi, const float* m, const float* v, const float* g, float* dm, float* dv, float eps, size_t ns, size_t num) { for (auto n : grid_stride_range_y(0, ns)) { float temp_dv = 0; for (auto i : grid_stride_range(0, num)) { auto idx = n*num+i; const float x_hat = (s[idx] - m[n])*v[n]; bg[i] += gi[idx]; gg[i] += gi[idx]*x_hat; const float dx = gi[idx] * g[n]; temp_dv += dx*(s[idx] - m[n])*-0.5*v[n]*v[n]*v[n]; } warp_reduce_atomic_add(dv[n], temp_dv); } __syncthreads(); for (auto n : grid_stride_range_y(0, ns)) { float temp_dm = 0; for (auto i : grid_stride_range(0, num)) { auto idx = n*num+i; const float dx = gi[idx]*g[i]; temp_dm += dx*-v[n] + dv[n] * -2*(s[idx] - m[n])/num; } warp_reduce_atomic_add(dm[n], temp_dm); } __syncthreads(); for (auto n : grid_stride_range_y(0, ns)) { for (auto i : grid_stride_range(0, num)) { auto idx = n*num+i; const float dx = gi[idx]*g[i]; out[idx] += dx*v[n] + dv[n] * 2*(s[idx] - m[n])/num + dm[n]/num; } } } void layer_normalize ( const double eps, resizable_tensor& dest, resizable_tensor& means, resizable_tensor& invstds, const tensor& src, const tensor& gamma, const tensor& beta ) { const long num = src.k() * src.nr() * src.nc(); DLIB_CASSERT( have_same_dimensions(gamma, beta) && src.k() == gamma.k() && src.nr() == gamma.nr() && src.nc() == gamma.nc() && eps > 0, "\ngamma.k(): " << gamma.k() << "\ngamma.nr(): " << gamma.nr() << "\ngamma.nc(): " << gamma.nc() << "\nbeta.k(): " << beta.k() << "\nbeta.nr(): " << beta.nr() << "\nbeta.nc(): " << beta.nc() << "\nsrc.k(): " << src.k() << "\nsrc.nr(): " << src.nr() << "\nsrc.nc(): " << src.nc() << "\neps: " << eps ); dest.copy_size(src); means.set_size(src.num_samples()); invstds.set_size(src.num_samples()); means = 0; invstds = 0; launch_kernel(_cuda_layer_normalize, max_jobs(num, src.num_samples()), dest.device(), src.device(), means.device(), invstds.device(), gamma.device(), beta.device(), eps, src.num_samples(), num); } void layer_normalize_gradient ( const double eps, const tensor& gradient_input, const tensor& means, const tensor& invstds, const tensor& src, const tensor& gamma, tensor& src_grad, tensor& gamma_grad, tensor& beta_grad ) { const long num = src.k() * src.nr() * src.nc(); DLIB_CASSERT(src.num_samples() == means.size()); DLIB_CASSERT(src.num_samples() == invstds.size()); DLIB_CASSERT(src.k() == gamma.k()); DLIB_CASSERT(src.nr() == gamma.nr()); DLIB_CASSERT(src.nc() == gamma.nc()); DLIB_CASSERT(have_same_dimensions(gradient_input, src)); DLIB_CASSERT(have_same_dimensions(gradient_input, src_grad)); DLIB_CASSERT(have_same_dimensions(gamma_grad, gamma)); DLIB_CASSERT(have_same_dimensions(gamma_grad, beta_grad)); DLIB_CASSERT(eps > 0); beta_grad = 0; gamma_grad = 0; resizable_tensor dvars, dmeans; dvars.copy_size(invstds); dmeans.copy_size(means); dvars = 0; dmeans = 0; launch_kernel(_cuda_layer_normalize_gradient, max_jobs(num, src.num_samples()), src_grad.device(), gamma_grad.device(), beta_grad.device(), src.device(), gradient_input.device(), means.device(), invstds.device(), gamma.device(), dmeans.device(), dvars.device(), eps, src.num_samples(), num); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_copy_tensor_add_to (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size) { for(auto i : grid_stride_range(0, size)) { size_t blk = i/block_size; size_t j = i%block_size; dest[blk*dest_stride + j] += src[blk*src_stride + j]; } } __global__ void _cuda_copy_tensor (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size) { for(auto i : grid_stride_range(0, size)) { size_t blk = i/block_size; size_t j = i%block_size; dest[blk*dest_stride + j] = src[blk*src_stride + j]; } } void copy_tensor( bool add_to, tensor& dest, size_t dest_k_offset, const tensor& src, size_t src_k_offset, size_t count_k ) { const size_t dest_sample_size = static_cast<size_t>(dest.nc() * dest.nr() * dest.k()); const size_t src_sample_size = static_cast<size_t>(src.nc() * src.nr() * src.k()); const size_t block_size = count_k * dest.nc() * dest.nr(); DLIB_CASSERT(dest.num_samples() == src.num_samples() && dest.nc() == src.nc() && dest.nr() == src.nr(), "All sources should fit into dest tensor size"); DLIB_CASSERT(dest.k() - dest_k_offset >= count_k, "Not enough space in dest tensor"); DLIB_CASSERT(src.k() - src_k_offset >= count_k, "Not enough space in src tensor"); float* dest_p = dest.device() + dest_k_offset * dest.nc() * dest.nr(); const float* src_p = src.device() + src_k_offset * src.nc() * src.nr();; if (add_to) { launch_kernel(_cuda_copy_tensor_add_to, max_jobs(dest.size()), dest_p, block_size*dest.num_samples(), src_p, dest_sample_size, src_sample_size, block_size); } else { launch_kernel(_cuda_copy_tensor, max_jobs(dest.size()), dest_p, block_size*dest.num_samples(), src_p, dest_sample_size, src_sample_size, block_size); } } // ---------------------------------------------------------------------------------------- __device__ float cuda_log1pexp(float x) { if (x <= -18) return ::exp(x); else if (-18 < x && x <= 9) return std::log1pf(::exp(x)); else if (9 < x && x <= 16) return x + expf(-x); else return x; } __global__ void _cuda_compute_loss_binary_log_per_pixel(float* loss_out, float* g, const float* truth, const float* out_data, size_t n, const float scale) { float loss = 0; for(auto i : grid_stride_range(0, n)) { const float y = truth[i]; if (y > 0.f) { const float temp = cuda_log1pexp(-out_data[i]); loss += y*temp; g[i] = y*scale*(g[i]-1); } else if (y < 0.f) { const float temp = -(-out_data[i]-cuda_log1pexp(-out_data[i])); loss += -y*temp; g[i] = -y*scale*g[i]; } else { g[i] = 0.f; } } warp_reduce_atomic_add(*loss_out, loss); } // ---------------------------------------------------------------------------------------- __device__ float cuda_safe_log(float x, float epsilon = 1e-10) { // Prevent trying to calculate the logarithm of a very small number (let alone zero) if (x >= epsilon) return ::log(x); else return ::log(epsilon); } __global__ void _cuda_compute_loss_multiclass_log_per_pixel(float* loss_out, float* g, const uint16_t* truth, size_t n, size_t plane_size, size_t sample_size, size_t nk, uint16_t label_to_ignore, const float scale) { float loss = 0; for(auto i : grid_stride_range(0, n)) { const size_t k = (i/plane_size)%nk; const size_t idx = (i%plane_size) + plane_size*(i/sample_size); const size_t y = truth[idx]; if (k == y) { loss -= cuda_safe_log(g[i]); g[i] = scale*(g[i] - 1); } else if (y == label_to_ignore) { g[i] = 0.f; } else { g[i] = scale*g[i]; } } warp_reduce_atomic_add(*loss_out, loss); } __global__ void _cuda_compute_loss_multiclass_log_per_pixel_weighted(float* loss_out, float* g, const uint16_t* truth, size_t n, size_t plane_size, size_t sample_size, size_t nk, const float* weights, const float scale) { float loss = 0; for(auto i : grid_stride_range(0, n)) { const size_t k = (i/plane_size)%nk; const size_t idx = (i%plane_size) + plane_size*(i/sample_size); const size_t y = truth[idx]; const float weight = weights[idx]; if (k == y) { loss -= weight*cuda_safe_log(g[i]); g[i] = weight*scale*(g[i] - 1); } else { g[i] = weight*scale*g[i]; } } warp_reduce_atomic_add(*loss_out, loss); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_compute_loss_mean_squared_per_channel_and_pixel(float* loss_out, float* g, const float* truth, const float* out_data, size_t n, const float scale) { float loss = 0; for (auto i : grid_stride_range(0, n)) { const float y = truth[i]; const float temp = y - out_data[i]; loss += temp * temp; g[i] = -temp * scale; } warp_reduce_atomic_add(*loss_out, loss); } // ---------------------------------------------------------------------------------------- void compute_loss_binary_log_per_pixel:: do_work( cuda_data_ptr<float> loss_work_buffer, cuda_data_ptr<const float> truth_buffer, const tensor& subnetwork_output, tensor& gradient, double& loss ) { CHECK_CUDA(hipMemset(loss_work_buffer, 0, sizeof(float))); sigmoid(gradient, subnetwork_output); // The loss we output is the average loss over the mini-batch, and also over each element of the matrix output. const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc()); launch_kernel(_cuda_compute_loss_binary_log_per_pixel, max_jobs(gradient.size()), loss_work_buffer.data(), gradient.device(), truth_buffer.data(), subnetwork_output.device(), gradient.size(), scale); float floss; dlib::cuda::memcpy(&floss, loss_work_buffer); loss = scale*floss; } void compute_loss_multiclass_log_per_pixel:: do_work( cuda_data_ptr<float> loss_work_buffer, cuda_data_ptr<const uint16_t> truth_buffer, const tensor& subnetwork_output, tensor& gradient, double& loss ) { CHECK_CUDA(hipMemset(loss_work_buffer, 0, sizeof(float))); softmax(gradient, subnetwork_output); static const uint16_t label_to_ignore = std::numeric_limits<uint16_t>::max(); // The loss we output is the average loss over the mini-batch, and also over each element of the matrix output. const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc()); launch_kernel(_cuda_compute_loss_multiclass_log_per_pixel, max_jobs(gradient.size()), loss_work_buffer.data(), gradient.device(), truth_buffer.data(), gradient.size(), gradient.nr()*gradient.nc(), gradient.nr()*gradient.nc()*gradient.k(), gradient.k(), label_to_ignore, scale); float floss; dlib::cuda::memcpy(&floss, loss_work_buffer); loss = scale*floss; } void compute_loss_multiclass_log_per_pixel_weighted:: do_work( cuda_data_ptr<float> loss_work_buffer, cuda_data_ptr<const uint16_t> truth_buffer, cuda_data_ptr<const float> weights_buffer, const tensor& subnetwork_output, tensor& gradient, double& loss ) { CHECK_CUDA(hipMemset(loss_work_buffer, 0, sizeof(float))); softmax(gradient, subnetwork_output); // The loss we output is the average loss over the mini-batch, and also over each element of the matrix output. const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc()); launch_kernel(_cuda_compute_loss_multiclass_log_per_pixel_weighted, max_jobs(gradient.size()), loss_work_buffer.data(), gradient.device(), truth_buffer.data(), gradient.size(), gradient.nr()*gradient.nc(), gradient.nr()*gradient.nc()*gradient.k(), gradient.k(), weights_buffer.data(), scale); float floss; dlib::cuda::memcpy(&floss, loss_work_buffer); loss = scale*floss; } void compute_loss_mean_squared_per_channel_and_pixel:: do_work( cuda_data_ptr<float> loss_work_buffer, cuda_data_ptr<const float> truth_buffer, const tensor& subnetwork_output, tensor& gradient, double& loss ) { CHECK_CUDA(hipMemset(loss_work_buffer, 0, sizeof(float))); // The loss we output is the average loss over the mini-batch, and also over each element of the matrix output. const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.k() * subnetwork_output.nr() * subnetwork_output.nc()); launch_kernel(_cuda_compute_loss_mean_squared_per_channel_and_pixel , max_jobs(gradient.size()), loss_work_buffer.data(), gradient.device(), truth_buffer.data(), subnetwork_output.device(), gradient.size(), scale); float floss; dlib::cuda::memcpy(&floss, loss_work_buffer); loss = scale*floss; } // ---------------------------------------------------------------------------------------- } }
0f7bb1e803101098180175be5f7fd0ff166841c3.cu
// Copyright (C) 2015 Davis E. King ([email protected]) // License: Boost Software License See LICENSE.txt for the full license. #include "cuda_utils.h" #include "cuda_dlib.h" #include "cudnn_dlibapi.h" #include <math_constants.h> namespace dlib { namespace cuda { // ----------------------------------------------------------------------------------- void set_device ( int dev ) { CHECK_CUDA(cudaSetDevice(dev)); } int get_device ( ) { int dev = 0; CHECK_CUDA(cudaGetDevice(&dev)); return dev; } std::string get_device_name ( int device ) { cudaDeviceProp props; CHECK_CUDA(cudaGetDeviceProperties(&props, device)); return props.name; } void set_current_device_blocking_sync( ) { CHECK_CUDA(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync)); } int get_num_devices ( ) { int num_devices; CHECK_CUDA(cudaGetDeviceCount(&num_devices)); return num_devices; } bool can_access_peer (int device_id, int peer_device_id) { int can_access; CHECK_CUDA(cudaDeviceCanAccessPeer(&can_access, device_id, peer_device_id)); return can_access != 0; } bool can_access_peer (const tensor& device, const tensor& peer_device) { return can_access_peer(device.device_id(), peer_device.device_id()); } void device_synchronize (int dev) { raii_set_device set_dev(dev); CHECK_CUDA(cudaDeviceSynchronize()); } void device_synchronize (const tensor& dev) { device_synchronize(dev.device_id()); } enable_peer_access:: enable_peer_access( int device_id, int peer_device_id ) : call_disable(false), device_id(device_id), peer_device_id(peer_device_id) { raii_set_device set_dev(device_id); auto err = cudaDeviceEnablePeerAccess(peer_device_id, 0); if (err == cudaSuccess) { call_disable = true; } else if (err == cudaErrorPeerAccessAlreadyEnabled) { // call cudaGetLastError() to dispose of this error since we don't // care. auto err2 = cudaGetLastError(); if (err2 != cudaErrorPeerAccessAlreadyEnabled) CHECK_CUDA(err2); } else { CHECK_CUDA(err); } } enable_peer_access:: ~enable_peer_access() noexcept(false) { if (call_disable) { raii_set_device set_dev(device_id); CHECK_CUDA(cudaDeviceDisablePeerAccess(peer_device_id)); } } // ----------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------- __global__ void _cuda_inverse_norms(float* invnorms, const float* data, size_t nr, size_t nc, const float eps) { // initialize invnorms before we begin. for (auto i : grid_stride_range_y(0, nr)) for (auto j : grid_stride_range(0, 1)) invnorms[i] = eps; __syncthreads(); for (auto i : grid_stride_range_y(0, nr)) { auto p = data + i*nc; float temp = 0; for (auto j : grid_stride_range(0, nc)) temp += p[j]*p[j]; // and store the sum into invnorms[i] warp_reduce_atomic_add(invnorms[i], temp); } __syncthreads(); for (auto i : grid_stride_range_y(0, nr)) for (auto j : grid_stride_range(0, 1)) invnorms[i] = 1.0/std::sqrt(invnorms[i]); } void inverse_norms ( resizable_tensor& invnorms, const tensor& data, const double eps ) { invnorms.set_size(data.num_samples()); launch_kernel(_cuda_inverse_norms, max_jobs(data.size()/data.num_samples(), data.num_samples()), invnorms.device(), data.device(), data.num_samples(), data.size()/data.num_samples(), eps); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_dot_prods(float* out, const float* lhs, const float* rhs, size_t nr, size_t nc) { // initialize out before we begin. for (auto i : grid_stride_range_y(0, nr)) for (auto j : grid_stride_range(0, 1)) out[i] = 0; __syncthreads(); for (auto i : grid_stride_range_y(0, nr)) { auto l = lhs + i*nc; auto r = rhs + i*nc; float temp = 0; for (auto j : grid_stride_range(0, nc)) temp += l[j]*r[j]; // and store the sum into out[i] warp_reduce_atomic_add(out[i], temp); } } __global__ void _cuda_dot_prods_add_to(float* out, const float* lhs, const float* rhs, size_t nr, size_t nc) { for (auto i : grid_stride_range_y(0, nr)) { auto l = lhs + i*nc; auto r = rhs + i*nc; float temp = 0; for (auto j : grid_stride_range(0, nc)) temp += l[j]*r[j]; // and store the sum into out[i] warp_reduce_atomic_add(out[i], temp); } } void dot_prods ( resizable_tensor& out, const tensor& lhs, const tensor& rhs ) { DLIB_CASSERT(have_same_dimensions(lhs,rhs)); out.set_size(lhs.num_samples()); if (out.size() == 0) return; const auto nr = lhs.num_samples(); const auto nc = lhs.size()/lhs.num_samples(); launch_kernel(_cuda_dot_prods, max_jobs(nc,nr), out.device_write_only(), lhs.device(), rhs.device(), nr, nc); } void dot_prods ( bool add_to, tensor& out, const tensor& lhs, const tensor& rhs ) { DLIB_CASSERT(have_same_dimensions(lhs,rhs)); DLIB_CASSERT(out.k() == 1 && out.nr() == 1 && out.nc() == 1); DLIB_CASSERT(out.size() == lhs.num_samples()); const auto nr = lhs.num_samples(); const auto nc = lhs.size()/lhs.num_samples(); if (add_to) launch_kernel(_cuda_dot_prods_add_to, max_jobs(nc,nr), out.device(), lhs.device(), rhs.device(), nr, nc); else launch_kernel(_cuda_dot_prods, max_jobs(nc,nr), out.device_write_only(), lhs.device(), rhs.device(), nr, nc); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_scale_columns(float* out, const float* m, const float* v, size_t nr, size_t nc) { for (auto j : grid_stride_range(0, nr*nc)) { out[j] = m[j]*v[j%nc]; } } void scale_columns ( tensor& out, const tensor& m, const tensor& v ) { launch_kernel(_cuda_scale_columns, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_scale_rows(float* out, const float* m, const float* v, size_t nr, size_t nc) { for (auto j : grid_stride_range(0, nr*nc)) { out[j] = m[j]*v[j/nc]; } } void scale_rows ( tensor& out, const tensor& m, const tensor& v ) { launch_kernel(_cuda_scale_rows, max_jobs(m.size()), out.device(), m.device(), v.device(), m.num_samples(), m.size()/m.num_samples()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_scale_rows2(float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc) { for (auto j : grid_stride_range(0, nr*nc)) { out[j] = (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc]; } } __global__ void _cuda_scale_rows2_beta(const float beta, float* out, const float* m1, const float* m2, const float* v1, const float* v2, size_t nr, size_t nc) { for (auto j : grid_stride_range(0, nr*nc)) { out[j] = beta*out[j] + (m1[j] - m2[j]*v1[j/nc]) * v2[j/nc]; } } void scale_rows2 ( float beta, tensor& out, const tensor& m1, const tensor& m2, const tensor& v1, const tensor& v2 ) { if (beta == 0) { launch_kernel(_cuda_scale_rows2, max_jobs(m1.size()), out.device(), m1.device(), m2.device(), v1.device(), v2.device(), m1.num_samples(), m1.size()/m1.num_samples()); } else { launch_kernel(_cuda_scale_rows2_beta, max_jobs(m1.size()), beta, out.device(), m1.device(), m2.device(), v1.device(), v2.device(), m1.num_samples(), m1.size()/m1.num_samples()); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_exp(float* dest, const float* src, size_t n) { for (auto i : grid_stride_range(0, n)) dest[i] = ::exp(src[i]); } void exp ( tensor& dest, const tensor& src ) { DLIB_ASSERT(dest.size() == src.size()); launch_kernel(_cuda_exp, max_jobs(src.size()), dest.device(), src.device(), src.size()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_log(float* dest, const float* src, size_t n) { for (auto i : grid_stride_range(0, n)) dest[i] = ::log(src[i]); } void log ( tensor& dest, const tensor& src ) { DLIB_ASSERT(dest.size() == src.size()); launch_kernel(_cuda_log, max_jobs(src.size()), dest.device(), src.device(), src.size()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_log10(float* dest, const float* src, size_t n) { for (auto i : grid_stride_range(0, n)) dest[i] = ::log10(src[i]); } void log10 ( tensor& dest, const tensor& src ) { DLIB_ASSERT(dest.size() == src.size()); launch_kernel(_cuda_log10, max_jobs(src.size()), dest.device(), src.device(), src.size()); } // ----------------------------------------------------------------------------------- __global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i]*s2[i]; } } __global__ void _cuda_multiply2(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n, size_t max_size) { for (auto i : grid_stride_range(0, n)) { d[i] = 0; for (size_t j = i; j < max_size; j += n) d[i] += s1[j%s1_n]*s2[j%s2_n]; } } __global__ void _cuda_multiply3(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i%s1_n]*s2[i%s2_n]; } } __global__ void _cuda_multiply1_add_to(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] += s1[i]*s2[i]; } } __global__ void _cuda_multiply2_add_to(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n, size_t max_size) { for (auto i : grid_stride_range(0, n)) { for (size_t j = i; j < max_size; j += n) d[i] += s1[j%s1_n]*s2[j%s2_n]; } } __global__ void _cuda_multiply3_add_to(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n) { for (auto i : grid_stride_range(0, n)) { d[i] += s1[i%s1_n]*s2[i%s2_n]; } } void multiply ( bool add_to, tensor& dest, const tensor& src1, const tensor& src2 ) { DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() && dest.nr() == src1.nr() && src1.nr() == src2.nr() && dest.nc() == src1.nc() && src1.nc() == src2.nc() ); const long MD = std::max(std::max(dest.num_samples(),src1.num_samples()),src2.num_samples()); DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) && (src1.num_samples()==1 || src1.num_samples()==MD) && (src2.num_samples()==1 || src2.num_samples()==MD) ); if (dest.size() == 0) return; const size_t max_size = std::max(std::max(dest.size(),src1.size()),src2.size()); const auto d = dest.host(); const auto s1 = src1.host(); const auto s2 = src2.host(); if (dest.size() == src1.size() && src1.size() == src2.size()) { if (add_to) launch_kernel(_cuda_multiply1_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size()); else launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size()); } else if (dest.num_samples() == 1) { if (add_to) launch_kernel(_cuda_multiply2_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size(), max_size); else launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size(), max_size); } else { if (add_to) launch_kernel(_cuda_multiply3_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size()); else launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size()); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { for (auto i : grid_stride_range(0, n)) { auto k = (i/bs)%ks; d[i] = s1[i]*s2[k]; } } __global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { // zero initialize d before we begin. for (auto i : grid_stride_range_y(0, ks)) for (auto j : grid_stride_range(0, 1)) d[i] = 0; __syncthreads(); // loop over all the image planes for (auto i : grid_stride_range_y(0, n)) { // sum all the elements in the i-th image plane float temp = 0; for (auto j : grid_stride_range(i*bs, (i+1)*bs)) temp += s1[j]*s2[j]; auto k = i%ks; // and store the sum into d[k] warp_reduce_atomic_add(d[k], temp); } } __global__ void _cuda_multiply_conv_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { for (auto i : grid_stride_range(0, n)) { auto k = (i/bs)%ks; d[i] += s1[i]*s2[k]; } } __global__ void _cuda_multiply_conv2_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { // loop over all the image planes for (auto i : grid_stride_range_y(0, n)) { // sum all the elements in the i-th image plane float temp = 0; for (auto j : grid_stride_range(i*bs, (i+1)*bs)) temp += s1[j]*s2[j]; auto k = i%ks; // and store the sum into d[k] warp_reduce_atomic_add(d[k], temp); } } void multiply_conv ( bool add_to, tensor& dest, const tensor& src1, const tensor& src2 ) { if (have_same_dimensions(dest,src1)) { DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k()); if (dest.size() == 0) return; if (add_to) launch_kernel(_cuda_multiply_conv_add_to,max_jobs(dest.size()), dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k()); else launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()), dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k()); } else { DLIB_CASSERT(have_same_dimensions(src1,src2)); DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k()); if (dest.size() == 0) return; const auto bs = src1.nr()*src1.nc(); const auto n = src1.num_samples()*src1.k(); if (add_to) launch_kernel(_cuda_multiply_conv2_add_to, max_jobs(bs,n), dest.device(), src1.device(), n, src2.device(), bs, src1.k()); else launch_kernel(_cuda_multiply_conv2, max_jobs(bs,n), dest.device(), src1.device(), n, src2.device(), bs, src1.k()); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_scale_channels_add_to(float* d, const float* src, size_t n, const float* scales, size_t bs) { for (auto i : grid_stride_range(0, n)) { auto k = i/bs; d[i] += src[i]*scales[k]; } } __global__ void _cuda_scale_channels(float* d, const float* src, size_t n, const float* scales, size_t bs) { for (auto i : grid_stride_range(0, n)) { auto k = i/bs; d[i] = src[i]*scales[k]; } } void scale_channels ( bool add_to, tensor& dest, const tensor& src, const tensor& scales ) { DLIB_CASSERT(have_same_dimensions(dest,src) && scales.num_samples() == src.num_samples() && scales.k() == src.k() && scales.nr() == 1 && scales.nc() == 1 ); if (dest.size() == 0) return; if (add_to) launch_kernel(_cuda_scale_channels_add_to,max_jobs(dest.size()), dest.device(), src.device(), src.size(), scales.device(), src.nr()*src.nc()); else launch_kernel(_cuda_scale_channels,max_jobs(dest.size()), dest.device_write_only(), src.device(), src.size(), scales.device(), src.nr()*src.nc()); } // ------------------------------------------------------------------------------------ __global__ void _cuda_mult1(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i]*s2[i]; } } __global__ void _cuda_mult1_add_to(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] += s1[i]*s2[i]; } } __global__ void _cuda_mult2(float* d, const float* s1, const float* s2, size_t dn, size_t dk, size_t dr, size_t dc, size_t s1n, size_t s1k, size_t s1r, size_t s1c, size_t s2n, size_t s2k, size_t s2r, size_t s2c) { for (auto i : grid_stride_range(0, dn*dk*dr*dc)) { size_t n,k,r,c; unpack_idx(i, dk,dr,dc, n,k,r,c); float v1 = 0; float v2 = 0; if (n < s1n && k < s1k && r < s1r && c < s1c ) { v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)]; } if (n < s2n && k < s2k && r < s2r && c < s2c ) { v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)]; } d[i] = v1*v2; } } __global__ void _cuda_mult2_add_to(float* d, const float* s1, const float* s2, size_t dn, size_t dk, size_t dr, size_t dc, size_t s1n, size_t s1k, size_t s1r, size_t s1c, size_t s2n, size_t s2k, size_t s2r, size_t s2c) { for (auto i : grid_stride_range(0, dn*dk*dr*dc)) { size_t n,k,r,c; unpack_idx(i, dk,dr,dc, n,k,r,c); float v1 = 0; float v2 = 0; if (n < s1n && k < s1k && r < s1r && c < s1c ) { v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)]; } if (n < s2n && k < s2k && r < s2r && c < s2c ) { v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)]; } d[i] += v1*v2; } } void multiply_zero_padded ( bool add_to, tensor& dest, const tensor& src1, const tensor& src2 ) { if (dest.size() == 0) return; // Do the simple and fast version if everything has the same dimensions if (have_same_dimensions(dest, src1) && have_same_dimensions(dest, src2)) { if (add_to) launch_kernel(_cuda_mult1_add_to,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size()); else launch_kernel(_cuda_mult1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size()); } else { if (add_to) { // Otherwise, do the more complex version with bounds checking. launch_kernel(_cuda_mult2_add_to,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.num_samples(), dest.k(), dest.nr(), dest.nc(), src1.num_samples(), src1.k(), src1.nr(), src1.nc(), src2.num_samples(), src2.k(), src2.nr(), src2.nc() ); } else { // Otherwise, do the more complex version with bounds checking. launch_kernel(_cuda_mult2,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.num_samples(), dest.k(), dest.nr(), dest.nc(), src1.num_samples(), src1.k(), src1.nr(), src1.nc(), src2.num_samples(), src2.k(), src2.nr(), src2.nc() ); } } } // ------------------------------------------------------------------------------------ __global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i]+s2[i]; } } __global__ void _cuda_add2(float* d, const float* s1, const float* s2, size_t dn, size_t dk, size_t dr, size_t dc, size_t s1n, size_t s1k, size_t s1r, size_t s1c, size_t s2n, size_t s2k, size_t s2r, size_t s2c) { for (auto i : grid_stride_range(0, dn*dk*dr*dc)) { size_t n,k,r,c; unpack_idx(i, dk,dr,dc, n,k,r,c); float v1 = 0; float v2 = 0; if (n < s1n && k < s1k && r < s1r && c < s1c ) { v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)]; } if (n < s2n && k < s2k && r < s2r && c < s2c ) { v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)]; } d[i] = v1+v2; } } void add ( tensor& dest, const tensor& src1, const tensor& src2 ) { if (dest.size() == 0) return; // Do the simple and fast version if everything has the same dimensions if (have_same_dimensions(dest, src1) && have_same_dimensions(dest, src2)) { launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size()); } else { // Otherwise, do the more complex version with bounds checking. launch_kernel(_cuda_add2,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.num_samples(), dest.k(), dest.nr(), dest.nc(), src1.num_samples(), src1.k(), src1.nr(), src1.nc(), src2.num_samples(), src2.k(), src2.nr(), src2.nc() ); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s[i] + B; } } __global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s[i]; } } void affine_transform( tensor& dest, const tensor& src, const float A, const float B ) { DLIB_CASSERT(dest.size()==src.size()); if (B != 0) launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B); else launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A); } void affine_transform( tensor& dest, const tensor& src, const float A ) { DLIB_CASSERT(dest.size()==src.size()); launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform_rect( float* d, const float* s1, const float* s2, const float* s3, float A, float B, float C, size_t start_idx, size_t n, size_t rect_nc, size_t total_nc ) { for (auto i : grid_stride_range(0, n)) { size_t r = i/rect_nc; size_t c = i%rect_nc; size_t idx = r*total_nc + c + start_idx; d[idx] = A*s1[idx] + B*s2[idx] + C*s3[idx]; } } void affine_transform( const rectangle& rect, tensor& dest, const tensor& src1, const tensor& src2, const tensor& src3, float A, float B, float C ) { DLIB_CASSERT(dest.size() == src1.size()); DLIB_CASSERT(dest.size() == src2.size()); DLIB_CASSERT(dest.size() == src3.size()); DLIB_CASSERT(dest.num_samples() == src1.num_samples()); DLIB_CASSERT(dest.num_samples() == src2.num_samples()); DLIB_CASSERT(dest.num_samples() == src3.num_samples()); DLIB_CASSERT(rectangle(0,0, dest.size()/dest.num_samples()-1, dest.num_samples()-1).contains(rect)); launch_kernel(_cuda_affine_transform_rect,max_jobs(rect.area()), dest.device(), src1.device(), src2.device(), src3.device(), A, B, C, rect.left() + rect.top()*(dest.size()/dest.num_samples()), rect.area(), rect.width(), dest.size()/dest.num_samples()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i] + C; } } __global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i]; } } void affine_transform( tensor& dest, const tensor& src1, const tensor& src2, const float A, const float B, const float C ) { DLIB_CASSERT(dest.size()==src1.size()); DLIB_CASSERT(dest.size()==src2.size()); if (C != 0) launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C); else launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B); } void affine_transform( tensor& dest, const tensor& src1, const tensor& src2, const float A, const float B ) { DLIB_CASSERT(dest.size()==src1.size()); DLIB_CASSERT(dest.size()==src2.size()); launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale) { for (auto i : grid_stride_range(0, n)) { d[i] += scale*s[i]; } } void add_scaled( tensor& dest, const float scale, const tensor& src ) { DLIB_CASSERT(dest.size()==src.size()); launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_add_cv_to_all_columns(float beta, float* dest, float alpha, const float* src, size_t size, size_t stride) { for (auto i : grid_stride_range(0, size)) { dest[i] = beta*dest[i] + alpha*src[i/stride]; } } __global__ void _cuda_add_cv_to_all_columns_no_beta(float* dest, float alpha, const float* src, size_t size, size_t stride) { for (auto i : grid_stride_range(0, size)) { dest[i] = alpha*src[i/stride]; } } void add_cv_to_all_columns( float beta, tensor& dest, float alpha, const tensor& src ) { DLIB_CASSERT(dest.num_samples() == src.num_samples() && src.num_samples() == src.size()); if (beta == 0) launch_kernel(_cuda_add_cv_to_all_columns_no_beta, max_jobs(dest.size()), dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples()); else launch_kernel(_cuda_add_cv_to_all_columns, max_jobs(dest.size()), beta, dest.device(), alpha, src.device(), dest.size(), dest.size()/dest.num_samples()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform5( float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D ) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D; } } void affine_transform( tensor& dest, const tensor& src1, const tensor& src2, const tensor& src3, const float A, const float B, const float C, const float D ) { DLIB_CASSERT(dest.size()==src1.size()); DLIB_CASSERT(dest.size()==src2.size()); DLIB_CASSERT(dest.size()==src3.size()); launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src3.device(), dest.size(), A, B, C, D); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform_range( float* d, const float* s1, const float* s2, const float* s3, size_t begin, size_t end, float A, float B, float C ) { for (auto i : grid_stride_range(begin, end)) { d[i] = A*s1[i] + B*s2[i] + C*s3[i]; } } void affine_transform_range( size_t begin, size_t end, tensor& dest, const tensor& src1, const tensor& src2, const tensor& src3, const float A, const float B, const float C ) { DLIB_CASSERT(dest.size()==src1.size()); DLIB_CASSERT(dest.size()==src2.size()); DLIB_CASSERT(dest.size()==src3.size()); DLIB_CASSERT(begin <= end && end <= dest.size()); launch_kernel(_cuda_affine_transform_range,max_jobs(end-begin), dest.device(), src1.device(), src2.device(), src3.device(), begin, end, A, B, C); } // ----------------------------------------------------------------------------------- __global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B) { for (auto i : grid_stride_range(0, n)) { d[i] = A[i]*s[i] + B[i]; } } __global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs) { for (auto i : grid_stride_range(0, n)) { d[i] = A[i%bs]*s[i] + B[i%bs]; } } void affine_transform( tensor& dest, const tensor& src, const tensor& A, const tensor& B ) { DLIB_CASSERT(have_same_dimensions(dest, src)); DLIB_CASSERT( ((A.num_samples()==1 && B.num_samples()==1) || (A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples()))); DLIB_CASSERT( A.nr()==B.nr() && B.nr()==src.nr() && A.nc()==B.nc() && B.nc()==src.nc() && A.k() ==B.k() && B.k()==src.k(), "\nA.nr(): " << A.nr() << "\nB.nr(): " << B.nr() << "\nsrc.nr(): " << src.nr() <<"\nA.nc(): " << A.nc() << "\nB.nc(): " << B.nc() << "\nsrc.nc(): " << src.nc() <<"\nA.k(): " << A.k() << "\nB.k(): " << B.k() << "\nsrc.k(): " << src.k() ); if (A.num_samples() == 1) { launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size()); } else { launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device()); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_compute_adam_update( size_t begin, size_t end, float* s, float* m, float* v, const float alpha, const float weight_decay, const float momentum1, const float momentum2, const float* params, const float* params_grad ) { const float eps = 1e-8; // The loop is equivalent to doing this: // m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad); // v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad); // s = -alpha*m/(sqrt(v) + eps); for (auto i : grid_stride_range(begin, end)) { float g = (weight_decay*params[i] + params_grad[i]); m[i] = momentum1*m[i] + (1-momentum1)*g; v[i] = momentum2*v[i] + (1-momentum2)*g*g; s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps); } } void compute_adam_update ( size_t begin, size_t end, tensor& s, tensor& m, tensor& v, const float t, const float learning_rate, const float weight_decay, const float momentum1, const float momentum2, const tensor& params, const tensor& params_grad ) { DLIB_CASSERT(s.size() == m.size() && s.size() == v.size() && s.size() == params.size() && s.size() == params_grad.size()); DLIB_CASSERT(begin <= end && end <= params.size()); const float alpha = learning_rate*std::sqrt(1-std::pow(momentum2,t))/(1-std::pow(momentum1, t)); launch_kernel(_cuda_compute_adam_update,max_jobs(end-begin), begin, end, s.device(), m.device(), v.device(), alpha, weight_decay, momentum1, momentum2, params.device(), params_grad.device()); } // ----------------------------------------------------------------------------------- __global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks) { for (auto i : grid_stride_range(0, n)) { auto k = (i/bs)%ks; d[i] = A[k]*s[i] + B[k]; } } void affine_transform_conv( tensor& dest, const tensor& src, const tensor& A, const tensor& B ) { DLIB_CASSERT(have_same_dimensions(dest, src)); DLIB_CASSERT(have_same_dimensions(A, B)); DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k()); launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()), dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k()); } // ----------------------------------------------------------------------------------- __global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n) { for (auto i : grid_stride_range(0, n)) { out[i] = in[i]; for (size_t j = i+n; j < total_n; j+=n) out[i] += in[j]; } } void assign_bias_gradient ( tensor& grad, const tensor& gradient_input ) { DLIB_CASSERT( grad.num_samples() == 1 && gradient_input.k() == grad.k() && gradient_input.nr() == grad.nr() && gradient_input.nc() == grad.nc() && gradient_input.size() > 0); launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size()); } // ---------------------------------------------------------------------------------------- __global__ void _set_tensor(float* out, size_t n, const float val) { for (auto i : grid_stride_range(0, n)) out[i] = val; } void set_tensor ( tensor& t, float value ) { launch_kernel(_set_tensor, max_jobs(t.size()), t.device(), t.size(), value); } // ---------------------------------------------------------------------------------------- __global__ void _scale_tensor(float* out, size_t n, const float val) { for (auto i : grid_stride_range(0, n)) out[i] *= val; } void scale_tensor ( tensor& t, float value ) { launch_kernel(_scale_tensor, max_jobs(t.size()), t.device(), t.size(), value); } // ----------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------- __global__ void _cuda_threshold(float* d, size_t n, float thresh) { for (auto i : grid_stride_range(0, n)) { d[i] = d[i]>thresh ? 1:0; } } void threshold ( tensor& data, float thresh ) { launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh); } // ------------------------------------------------------------------------------------ __global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result) { // Parallel sum everything into local temp variables. float temp = 0; for(auto i : grid_stride_range(0, n)) temp += a[i]*b[i]; // Then do the warp reduce add thing to merge into one output value. warp_reduce_atomic_add(*result, temp); } void dot ( const tensor& a, const tensor& b, tensor& result, size_t idx ) { DLIB_CASSERT(a.size() == b.size()); DLIB_CASSERT(idx < result.size()); launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp) { const float p = *pp; for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) d[i] = s[i]; else d[i] = p*s[i]; } } void prelu ( tensor& dest, const tensor& src, const tensor& param ) { launch_kernel(_cuda_prelu, max_jobs(dest.size()), src.device(), dest.device(), src.size(), param.device()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad) { const float p = *pp; float pgrad = 0; for(auto i : grid_stride_range(0, n)) { if (s[i] > 0) { out[i] += gi[i]; } else { out[i] += p*gi[i]; pgrad += gi[i]*s[i]; } } // Then do the warp reduce add thing to merge into one output value. warp_reduce_atomic_add(*ppgrad, pgrad); } void prelu_gradient ( tensor& grad, const tensor& src, const tensor& gradient_input, const tensor& param, tensor& params_grad ) { params_grad = 0; launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()), grad.device(), src.device(), gradient_input.device(), grad.size(), param.device(), params_grad.device()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_leaky_relu(const float* s, float* d, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) d[i] = s[i]; else d[i] = alpha * s[i]; } } void leaky_relu( tensor& dest, const tensor &src, const float alpha ) { launch_kernel(_cuda_leaky_relu, max_jobs(dest.size()), src.device(), dest.device(), src.size(), alpha); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_leaky_relu_gradient_inplace(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) out[i] = gi[i]; else out[i] = alpha * gi[i]; } } __global__ void _cuda_leaky_relu_gradient(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) out[i] += gi[i]; else out[i] += alpha * gi[i]; } } void leaky_relu_gradient ( tensor& grad, const tensor& src, const tensor& gradient_input, const float alpha ) { float* out = grad.device(); const float* gi = gradient_input.device(); if (out == gi) { launch_kernel(_cuda_leaky_relu_gradient_inplace, max_jobs(grad.size()), out, src.device(), gi, grad.size(), alpha); } else { launch_kernel(_cuda_leaky_relu_gradient, max_jobs(grad.size()), out, src.device(), gi, grad.size(), alpha); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_mish(const float* s, float* d, size_t n) { for (auto i : grid_stride_range(0, n)) { const auto e = std::exp(s[i]); const auto delta = 2*e + e*e + 2; d[i] = s[i] - 2*s[i]/delta; } } void mish ( tensor& dest, const tensor& src ) { launch_kernel(_cuda_mish, max_jobs(dest.size()), src.device(), dest.device(), src.size()); } // ---------------------------------------------------------------------------------------- __device__ float mish_compute_gradient(float x) { if (x >= 8) return 1.f; if (x <= -8) return 0.f; const auto e = std::exp(x); const auto delta = 2*e + e*e + 2; const auto omega = 4*(x + 1) + 4*e*e + e*e*e + e*(4*x + 6); return e*omega/(delta*delta); } __global__ void _cuda_mish_gradient_inplace(float* out, const float* s, const float* gi, size_t n) { for (auto i : grid_stride_range(0, n)) out[i] = gi[i]*mish_compute_gradient(s[i]); } __global__ void _cuda_mish_gradient(float* out, const float* s, const float* gi, size_t n) { for (auto i : grid_stride_range(0, n)) out[i] += gi[i]*mish_compute_gradient(s[i]); } void mish_gradient ( tensor& grad, const tensor& src, const tensor& gradient_input ) { float* out = grad.device(); const float* gi = gradient_input.device(); if (out == gi) launch_kernel(_cuda_mish_gradient_inplace, max_jobs(grad.size()), out, src.device(), gi, grad.size()); else launch_kernel(_cuda_mish_gradient, max_jobs(grad.size()), out, src.device(), gi, grad.size()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_clipped_relu(const float* s, float* d, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] < 0) d[i] = 0; else if (s[i] > alpha) d[i] = alpha; else d[i] = s[i]; } } void clipped_relu ( tensor& dest, const tensor &src, const float alpha ) { launch_kernel(_cuda_clipped_relu, max_jobs(dest.size()), src.device(), dest.device(), src.size(), alpha); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_clipped_relu_gradient_inplace(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0 && s[i] < alpha) out[i] = gi[i]; else out[i] = 0.f; } } __global__ void _cuda_clipped_relu_gradient(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0 && s[i] < alpha) out[i] += gi[i]; } } void clipped_relu_gradient ( tensor& grad, const tensor& dest, const tensor& gradient_input, const float alpha ) { float* out = grad.device(); const float* gi = gradient_input.device(); if (out == gi) launch_kernel(_cuda_clipped_relu_gradient_inplace, max_jobs(grad.size()), out, dest.device(), gi, grad.size(), alpha); else launch_kernel(_cuda_clipped_relu_gradient, max_jobs(grad.size()), out, dest.device(), gi, grad.size(), alpha); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_elu(const float* s, float* d, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) d[i] = s[i]; else d[i] = alpha * (std::exp(s[i]) - 1.0f); } } void elu ( tensor& dest, const tensor &src, const float alpha ) { launch_kernel(_cuda_elu, max_jobs(dest.size()), src.device(), dest.device(), src.size(), alpha); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_elu_gradient_inplace(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) out[i] = gi[i]; else out[i] = (alpha + s[i]) * gi[i]; } } __global__ void _cuda_elu_gradient(float* out, const float* s, const float* gi, size_t n, const float alpha) { for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) out[i] += gi[i]; else out[i] += (alpha + s[i]) * gi[i]; } } void elu_gradient ( tensor& grad, const tensor& dest, const tensor& gradient_input, const float alpha ) { float* out = grad.device(); const float* gi = gradient_input.device(); if (out == gi) launch_kernel(_cuda_elu_gradient_inplace, max_jobs(grad.size()), out, dest.device(), gi, grad.size(), alpha); else launch_kernel(_cuda_elu_gradient, max_jobs(grad.size()), out, dest.device(), gi, grad.size(), alpha); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_gelu(const float* s, float* d, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s[i] * normcdf(s[i]); } } void gelu ( tensor& dest, const tensor& src ) { launch_kernel(_cuda_gelu, max_jobs(dest.size()), src.device(), dest.device(), src.size()); } // ---------------------------------------------------------------------------------------- __device__ float gelu_compute_gradient(float x) { const float beta = 1.0f / CUDART_SQRT_2PI; const float cdf = normcdf(x); const float pdf = beta*std::exp(-0.5f*x*x); return cdf + x * pdf; } __global__ void _cuda_gelu_gradient_inplace(float* out, const float* s, const float* gi, size_t n) { for (auto i : grid_stride_range(0, n)) out[i] = gi[i]*gelu_compute_gradient(s[i]); } __global__ void _cuda_gelu_gradient(float* out, const float* s, const float* gi, size_t n) { for (auto i : grid_stride_range(0, n)) out[i] += gi[i]*gelu_compute_gradient(s[i]); } void gelu_gradient ( tensor& grad, const tensor& src, const tensor& gradient_input ) { float* out = grad.device(); const float* gi = gradient_input.device(); if (out == gi) launch_kernel(_cuda_gelu_gradient_inplace, max_jobs(grad.size()), out, src.device(), gi, grad.size()); else launch_kernel(_cuda_gelu_gradient, max_jobs(grad.size()), out, src.device(), gi, grad.size()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_resize_bilinear(size_t dsize, size_t dchan_size, size_t dnc, float* d, size_t schan_size, int snr, int snc, const float* s, const float x_scale, const float y_scale) { for(auto i : grid_stride_range(0, dsize)) { const int idx = i%dchan_size; const int channel = i/dchan_size; const int sidx = channel*schan_size; const int r = idx/dnc; const int c = idx%dnc; const float y = r*y_scale; const int top = static_cast<int>(::floorf(y)); const int bottom = ::min(top+1, snr-1); const float tb_frac = y - top; const float x = c*x_scale; const int left = static_cast<int>(::floorf(x)); const int right = ::min(left+1, snc-1); const float lr_frac = x - left; float tl = s[sidx+top*snc+left]; float tr = s[sidx+top*snc+right]; float bl = s[sidx+bottom*snc+left]; float br = s[sidx+bottom*snc+right]; float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) + tb_frac*((1-lr_frac)*bl + lr_frac*br); d[i] = temp; } } __global__ void _cuda_resize_bilinear_strided(size_t dsize, size_t dchan_size, size_t dnc, float* d, size_t schan_size, int snr, int snc, const float* s, const float x_scale, const float y_scale, size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided ) { for(auto i : grid_stride_range(0, dsize)) { const int idx = i%dchan_size; const int channel = i/dchan_size; const int sidx = channel*schan_size; const int r = idx/dnc; const int c = idx%dnc; const int didx = channel*dest_chan_size_strided + r*dest_row_stride+c; const float y = r*y_scale; const int top = static_cast<int>(::floorf(y)); const int bottom = ::min(top+1, snr-1); const float tb_frac = y - top; const float x = c*x_scale; const int left = static_cast<int>(::floorf(x)); const int right = ::min(left+1, snc-1); const float lr_frac = x - left; float tl = s[sidx+top*src_row_stride+left]; float tr = s[sidx+top*src_row_stride+right]; float bl = s[sidx+bottom*src_row_stride+left]; float br = s[sidx+bottom*src_row_stride+right]; float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) + tb_frac*((1-lr_frac)*bl + lr_frac*br); d[didx] = temp; } } void resize_bilinear ( tensor& dest, long long dest_row_stride, long long dest_channel_stride, const tensor& src, long long src_row_stride, long long src_channel_stride ) { DLIB_CASSERT(is_same_object(dest, src)==false); DLIB_CASSERT(dest.num_samples() == src.num_samples()); DLIB_CASSERT(dest.k() == src.k()); if (dest.size() == 0 || src.size() == 0) return; const float x_scale = (src.nc()-1)/(float)std::max<long>((dest.nc()-1),1); const float y_scale = (src.nr()-1)/(float)std::max<long>((dest.nr()-1),1); if (dest.nc() == dest_row_stride && dest.nr()*dest.nc()==dest_channel_stride && src.nc() == src_row_stride && src.nr()*src.nc()==src_channel_stride) { launch_kernel(_cuda_resize_bilinear, dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(), src.nr()*src.nc(), src.nr(), src.nc(), src.device(), x_scale, y_scale); } else { launch_kernel(_cuda_resize_bilinear_strided, dest.size(), dest.nr()*dest.nc(), dest.nc(), dest.device(), src_channel_stride, src.nr(), src.nc(), src.device(), x_scale, y_scale, dest_row_stride, src_row_stride, dest_channel_stride); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_resize_bilinear_gradient(size_t dsize, size_t dchan_size, size_t dnc, const float* d, size_t schan_size, int snr, int snc, float* s, const float x_scale, const float y_scale) { for(auto i : grid_stride_range(0, dsize)) { const float tmp = d[i]; const int idx = i%dchan_size; const int channel = i/dchan_size; const int sidx = channel*schan_size; const int r = idx/dnc; const int c = idx%dnc; const float y = r*y_scale; const int top = static_cast<int>(::floorf(y)); const int bottom = ::min(top+1, snr-1); const float tb_frac = y - top; const float x = c*x_scale; const int left = static_cast<int>(::floorf(x)); const int right = ::min(left+1, snc-1); const float lr_frac = x - left; atomicAdd(s+sidx+top*snc+left, tmp*(1-tb_frac)*(1-lr_frac)); atomicAdd(s+sidx+top*snc+right, tmp*(1-tb_frac)*(lr_frac)); atomicAdd(s+sidx+bottom*snc+left, tmp*(tb_frac)*(1-lr_frac)); atomicAdd(s+sidx+bottom*snc+right, tmp*(tb_frac)*(lr_frac)); } } __global__ void _cuda_resize_bilinear_gradient_strided(size_t dsize, size_t dchan_size, size_t dnc, const float* d, size_t schan_size, int snr, int snc, float* s, const float x_scale, const float y_scale, size_t dest_row_stride, size_t src_row_stride, size_t dest_chan_size_strided ) { for(auto i : grid_stride_range(0, dsize)) { const int idx = i%dchan_size; const int channel = i/dchan_size; const int didx = channel*dest_chan_size_strided; const int sidx = channel*schan_size; const int r = idx/dnc; const int c = idx%dnc; const float tmp = d[didx + r*dest_row_stride+c]; const float y = r*y_scale; const int top = static_cast<int>(::floorf(y)); const int bottom = ::min(top+1, snr-1); const float tb_frac = y - top; const float x = c*x_scale; const int left = static_cast<int>(::floorf(x)); const int right = ::min(left+1, snc-1); const float lr_frac = x - left; atomicAdd(s+sidx+top*src_row_stride+left, tmp*(1-tb_frac)*(1-lr_frac)); atomicAdd(s+sidx+top*src_row_stride+right, tmp*(1-tb_frac)*(lr_frac)); atomicAdd(s+sidx+bottom*src_row_stride+left, tmp*(tb_frac)*(1-lr_frac)); atomicAdd(s+sidx+bottom*src_row_stride+right, tmp*(tb_frac)*(lr_frac)); } } void resize_bilinear_gradient ( tensor& grad, long long grad_row_stride, long long grad_channel_stride, const tensor& gradient_input, long long gradient_input_row_stride, long long gradient_input_channel_stride ) { DLIB_CASSERT(is_same_object(grad, gradient_input)==false); DLIB_CASSERT(gradient_input.num_samples() == grad.num_samples()); DLIB_CASSERT(gradient_input.k() == grad.k()); if (grad.size() == 0 || gradient_input.size() == 0) return; const float x_scale = (grad.nc()-1)/(float)std::max<long>((gradient_input.nc()-1),1); const float y_scale = (grad.nr()-1)/(float)std::max<long>((gradient_input.nr()-1),1); if (grad.nc() == grad_row_stride && grad.nr()*grad.nc()==grad_channel_stride && gradient_input.nc() == gradient_input_row_stride && gradient_input.nr()*gradient_input.nc()==gradient_input_channel_stride) { launch_kernel(_cuda_resize_bilinear_gradient, gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(), grad.nr()*grad.nc(), grad.nr(), grad.nc(), grad.device(), x_scale, y_scale); } else { launch_kernel(_cuda_resize_bilinear_gradient_strided, gradient_input.size(), gradient_input.nr()*gradient_input.nc(), gradient_input.nc(), gradient_input.device(), grad_channel_stride, grad.nr(), grad.nc(), grad.device(), x_scale, y_scale, gradient_input_row_stride, grad_row_stride, gradient_input_channel_stride); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_layer_normalize(float* out, const float* s, float* m, float* v, const float* g, const float* b, float eps, size_t ns, size_t num) { // compute means and sum of squares for (auto n : grid_stride_range_y(0, ns)) { auto p = s + n * num; float means = 0; float invstds = 0; for (auto i : grid_stride_range(0, num)) { means += p[i]; invstds += p[i] * p[i]; } warp_reduce_atomic_add(m[n], means/num); warp_reduce_atomic_add(v[n], invstds/num); } __syncthreads(); // compute variances for (auto n : grid_stride_range_y(0, ns)) { for (auto i : grid_stride_range(0, 1)) { auto var = v[n] - m[n] * m[n]; v[n] = 1.0f / std::sqrt(var + eps); } } __syncthreads(); for (auto n : grid_stride_range_y(0, ns)) { for (auto i : grid_stride_range(0, num)) { const float val = (s[n*num+i]-m[n])*v[n]; out[n*num+i] = val*g[i]+b[i]; } } } __global__ void _cuda_layer_normalize_gradient(float* out, float* gg, float* bg, const float* s, const float* gi, const float* m, const float* v, const float* g, float* dm, float* dv, float eps, size_t ns, size_t num) { for (auto n : grid_stride_range_y(0, ns)) { float temp_dv = 0; for (auto i : grid_stride_range(0, num)) { auto idx = n*num+i; const float x_hat = (s[idx] - m[n])*v[n]; bg[i] += gi[idx]; gg[i] += gi[idx]*x_hat; const float dx = gi[idx] * g[n]; temp_dv += dx*(s[idx] - m[n])*-0.5*v[n]*v[n]*v[n]; } warp_reduce_atomic_add(dv[n], temp_dv); } __syncthreads(); for (auto n : grid_stride_range_y(0, ns)) { float temp_dm = 0; for (auto i : grid_stride_range(0, num)) { auto idx = n*num+i; const float dx = gi[idx]*g[i]; temp_dm += dx*-v[n] + dv[n] * -2*(s[idx] - m[n])/num; } warp_reduce_atomic_add(dm[n], temp_dm); } __syncthreads(); for (auto n : grid_stride_range_y(0, ns)) { for (auto i : grid_stride_range(0, num)) { auto idx = n*num+i; const float dx = gi[idx]*g[i]; out[idx] += dx*v[n] + dv[n] * 2*(s[idx] - m[n])/num + dm[n]/num; } } } void layer_normalize ( const double eps, resizable_tensor& dest, resizable_tensor& means, resizable_tensor& invstds, const tensor& src, const tensor& gamma, const tensor& beta ) { const long num = src.k() * src.nr() * src.nc(); DLIB_CASSERT( have_same_dimensions(gamma, beta) && src.k() == gamma.k() && src.nr() == gamma.nr() && src.nc() == gamma.nc() && eps > 0, "\ngamma.k(): " << gamma.k() << "\ngamma.nr(): " << gamma.nr() << "\ngamma.nc(): " << gamma.nc() << "\nbeta.k(): " << beta.k() << "\nbeta.nr(): " << beta.nr() << "\nbeta.nc(): " << beta.nc() << "\nsrc.k(): " << src.k() << "\nsrc.nr(): " << src.nr() << "\nsrc.nc(): " << src.nc() << "\neps: " << eps ); dest.copy_size(src); means.set_size(src.num_samples()); invstds.set_size(src.num_samples()); means = 0; invstds = 0; launch_kernel(_cuda_layer_normalize, max_jobs(num, src.num_samples()), dest.device(), src.device(), means.device(), invstds.device(), gamma.device(), beta.device(), eps, src.num_samples(), num); } void layer_normalize_gradient ( const double eps, const tensor& gradient_input, const tensor& means, const tensor& invstds, const tensor& src, const tensor& gamma, tensor& src_grad, tensor& gamma_grad, tensor& beta_grad ) { const long num = src.k() * src.nr() * src.nc(); DLIB_CASSERT(src.num_samples() == means.size()); DLIB_CASSERT(src.num_samples() == invstds.size()); DLIB_CASSERT(src.k() == gamma.k()); DLIB_CASSERT(src.nr() == gamma.nr()); DLIB_CASSERT(src.nc() == gamma.nc()); DLIB_CASSERT(have_same_dimensions(gradient_input, src)); DLIB_CASSERT(have_same_dimensions(gradient_input, src_grad)); DLIB_CASSERT(have_same_dimensions(gamma_grad, gamma)); DLIB_CASSERT(have_same_dimensions(gamma_grad, beta_grad)); DLIB_CASSERT(eps > 0); beta_grad = 0; gamma_grad = 0; resizable_tensor dvars, dmeans; dvars.copy_size(invstds); dmeans.copy_size(means); dvars = 0; dmeans = 0; launch_kernel(_cuda_layer_normalize_gradient, max_jobs(num, src.num_samples()), src_grad.device(), gamma_grad.device(), beta_grad.device(), src.device(), gradient_input.device(), means.device(), invstds.device(), gamma.device(), dmeans.device(), dvars.device(), eps, src.num_samples(), num); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_copy_tensor_add_to (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size) { for(auto i : grid_stride_range(0, size)) { size_t blk = i/block_size; size_t j = i%block_size; dest[blk*dest_stride + j] += src[blk*src_stride + j]; } } __global__ void _cuda_copy_tensor (float* dest, size_t size, const float* src, size_t dest_stride, size_t src_stride, size_t block_size) { for(auto i : grid_stride_range(0, size)) { size_t blk = i/block_size; size_t j = i%block_size; dest[blk*dest_stride + j] = src[blk*src_stride + j]; } } void copy_tensor( bool add_to, tensor& dest, size_t dest_k_offset, const tensor& src, size_t src_k_offset, size_t count_k ) { const size_t dest_sample_size = static_cast<size_t>(dest.nc() * dest.nr() * dest.k()); const size_t src_sample_size = static_cast<size_t>(src.nc() * src.nr() * src.k()); const size_t block_size = count_k * dest.nc() * dest.nr(); DLIB_CASSERT(dest.num_samples() == src.num_samples() && dest.nc() == src.nc() && dest.nr() == src.nr(), "All sources should fit into dest tensor size"); DLIB_CASSERT(dest.k() - dest_k_offset >= count_k, "Not enough space in dest tensor"); DLIB_CASSERT(src.k() - src_k_offset >= count_k, "Not enough space in src tensor"); float* dest_p = dest.device() + dest_k_offset * dest.nc() * dest.nr(); const float* src_p = src.device() + src_k_offset * src.nc() * src.nr();; if (add_to) { launch_kernel(_cuda_copy_tensor_add_to, max_jobs(dest.size()), dest_p, block_size*dest.num_samples(), src_p, dest_sample_size, src_sample_size, block_size); } else { launch_kernel(_cuda_copy_tensor, max_jobs(dest.size()), dest_p, block_size*dest.num_samples(), src_p, dest_sample_size, src_sample_size, block_size); } } // ---------------------------------------------------------------------------------------- __device__ float cuda_log1pexp(float x) { if (x <= -18) return std::exp(x); else if (-18 < x && x <= 9) return std::log1pf(std::exp(x)); else if (9 < x && x <= 16) return x + expf(-x); else return x; } __global__ void _cuda_compute_loss_binary_log_per_pixel(float* loss_out, float* g, const float* truth, const float* out_data, size_t n, const float scale) { float loss = 0; for(auto i : grid_stride_range(0, n)) { const float y = truth[i]; if (y > 0.f) { const float temp = cuda_log1pexp(-out_data[i]); loss += y*temp; g[i] = y*scale*(g[i]-1); } else if (y < 0.f) { const float temp = -(-out_data[i]-cuda_log1pexp(-out_data[i])); loss += -y*temp; g[i] = -y*scale*g[i]; } else { g[i] = 0.f; } } warp_reduce_atomic_add(*loss_out, loss); } // ---------------------------------------------------------------------------------------- __device__ float cuda_safe_log(float x, float epsilon = 1e-10) { // Prevent trying to calculate the logarithm of a very small number (let alone zero) if (x >= epsilon) return ::log(x); else return ::log(epsilon); } __global__ void _cuda_compute_loss_multiclass_log_per_pixel(float* loss_out, float* g, const uint16_t* truth, size_t n, size_t plane_size, size_t sample_size, size_t nk, uint16_t label_to_ignore, const float scale) { float loss = 0; for(auto i : grid_stride_range(0, n)) { const size_t k = (i/plane_size)%nk; const size_t idx = (i%plane_size) + plane_size*(i/sample_size); const size_t y = truth[idx]; if (k == y) { loss -= cuda_safe_log(g[i]); g[i] = scale*(g[i] - 1); } else if (y == label_to_ignore) { g[i] = 0.f; } else { g[i] = scale*g[i]; } } warp_reduce_atomic_add(*loss_out, loss); } __global__ void _cuda_compute_loss_multiclass_log_per_pixel_weighted(float* loss_out, float* g, const uint16_t* truth, size_t n, size_t plane_size, size_t sample_size, size_t nk, const float* weights, const float scale) { float loss = 0; for(auto i : grid_stride_range(0, n)) { const size_t k = (i/plane_size)%nk; const size_t idx = (i%plane_size) + plane_size*(i/sample_size); const size_t y = truth[idx]; const float weight = weights[idx]; if (k == y) { loss -= weight*cuda_safe_log(g[i]); g[i] = weight*scale*(g[i] - 1); } else { g[i] = weight*scale*g[i]; } } warp_reduce_atomic_add(*loss_out, loss); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_compute_loss_mean_squared_per_channel_and_pixel(float* loss_out, float* g, const float* truth, const float* out_data, size_t n, const float scale) { float loss = 0; for (auto i : grid_stride_range(0, n)) { const float y = truth[i]; const float temp = y - out_data[i]; loss += temp * temp; g[i] = -temp * scale; } warp_reduce_atomic_add(*loss_out, loss); } // ---------------------------------------------------------------------------------------- void compute_loss_binary_log_per_pixel:: do_work( cuda_data_ptr<float> loss_work_buffer, cuda_data_ptr<const float> truth_buffer, const tensor& subnetwork_output, tensor& gradient, double& loss ) { CHECK_CUDA(cudaMemset(loss_work_buffer, 0, sizeof(float))); sigmoid(gradient, subnetwork_output); // The loss we output is the average loss over the mini-batch, and also over each element of the matrix output. const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc()); launch_kernel(_cuda_compute_loss_binary_log_per_pixel, max_jobs(gradient.size()), loss_work_buffer.data(), gradient.device(), truth_buffer.data(), subnetwork_output.device(), gradient.size(), scale); float floss; dlib::cuda::memcpy(&floss, loss_work_buffer); loss = scale*floss; } void compute_loss_multiclass_log_per_pixel:: do_work( cuda_data_ptr<float> loss_work_buffer, cuda_data_ptr<const uint16_t> truth_buffer, const tensor& subnetwork_output, tensor& gradient, double& loss ) { CHECK_CUDA(cudaMemset(loss_work_buffer, 0, sizeof(float))); softmax(gradient, subnetwork_output); static const uint16_t label_to_ignore = std::numeric_limits<uint16_t>::max(); // The loss we output is the average loss over the mini-batch, and also over each element of the matrix output. const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc()); launch_kernel(_cuda_compute_loss_multiclass_log_per_pixel, max_jobs(gradient.size()), loss_work_buffer.data(), gradient.device(), truth_buffer.data(), gradient.size(), gradient.nr()*gradient.nc(), gradient.nr()*gradient.nc()*gradient.k(), gradient.k(), label_to_ignore, scale); float floss; dlib::cuda::memcpy(&floss, loss_work_buffer); loss = scale*floss; } void compute_loss_multiclass_log_per_pixel_weighted:: do_work( cuda_data_ptr<float> loss_work_buffer, cuda_data_ptr<const uint16_t> truth_buffer, cuda_data_ptr<const float> weights_buffer, const tensor& subnetwork_output, tensor& gradient, double& loss ) { CHECK_CUDA(cudaMemset(loss_work_buffer, 0, sizeof(float))); softmax(gradient, subnetwork_output); // The loss we output is the average loss over the mini-batch, and also over each element of the matrix output. const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.nr() * subnetwork_output.nc()); launch_kernel(_cuda_compute_loss_multiclass_log_per_pixel_weighted, max_jobs(gradient.size()), loss_work_buffer.data(), gradient.device(), truth_buffer.data(), gradient.size(), gradient.nr()*gradient.nc(), gradient.nr()*gradient.nc()*gradient.k(), gradient.k(), weights_buffer.data(), scale); float floss; dlib::cuda::memcpy(&floss, loss_work_buffer); loss = scale*floss; } void compute_loss_mean_squared_per_channel_and_pixel:: do_work( cuda_data_ptr<float> loss_work_buffer, cuda_data_ptr<const float> truth_buffer, const tensor& subnetwork_output, tensor& gradient, double& loss ) { CHECK_CUDA(cudaMemset(loss_work_buffer, 0, sizeof(float))); // The loss we output is the average loss over the mini-batch, and also over each element of the matrix output. const double scale = 1.0 / (subnetwork_output.num_samples() * subnetwork_output.k() * subnetwork_output.nr() * subnetwork_output.nc()); launch_kernel(_cuda_compute_loss_mean_squared_per_channel_and_pixel , max_jobs(gradient.size()), loss_work_buffer.data(), gradient.device(), truth_buffer.data(), subnetwork_output.device(), gradient.size(), scale); float floss; dlib::cuda::memcpy(&floss, loss_work_buffer); loss = scale*floss; } // ---------------------------------------------------------------------------------------- } }
a3b57d06e64d80ef90a15a040967223d93dd8223.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Marina Doric //Final Project #include <stdio.h> #include "timerc.h" #include "test.cuh" #include "witness.cuh" #include "serial.cuh" //Main function *********************************************************************** int main(){ //Assume this has been run while pulling info from file "DNA.txt" //Set up the data int tsize = 2048*2048*256; //largest int allowed char* text = (char*) malloc(tsize*sizeof(char)); //text //Process text int i=0; char c = getchar(); while ((i<tsize)&&(c != EOF)){ if ((c != 13)&&(c != 10)&&(c != 0)){ text[i] = c; i += 1; } c = getchar(); } //If the text was smaller than the ascribed size, reset. tsize = i; hipLaunchKernelGGL(( warmup), dim3(1),dim3(1), 0, 0, ); //Warmup the GPU to ensure accurrate timing gerror(hipPeekAtLastError()); //Now, let's run tests on the GPU versions and compare to CPU results //******************************************************************** FILE *data; //Was going to get run times for KMP, but KMP doesn't work. I was mistaken. /*//Get run times for KMP //The multiple kernel version encounters an illegal memory access when the pattern sizes is 65537 or larger. So, we will restrict it. int maxp = 65537; int inc = 4; //To keep the run time of testing down int size = 10; //Run on full thing //Test: multiple kernel version and cpu witness data = fopen("KMPvsMultiple.txt", "w"); test(text, tsize, 2, 4, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //FROM HERE ON, KMP WAS COMMENTED OUT to speed up data collection //Second, test constant memory on large pattern sizes. size = 2048*32; //Constraint of constant memory version maxp = size; inc = 50; //Large to speed up data collection //Test: Multiple Kernel Constant Mem on largest patterns possible data = fopen("Constant.txt", "w"); test(text, size, 2, 4, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data); //*/ //Next, Generic comparisons and tests on limited text for all four versions //Test: synced parallel version w/global tree and cpu witness maxp = 2048; //Constraint of synced versions inc = 4; //Do pattern sizes 4,8,12,16,...,2048 data = fopen("Synced_WitCPU.txt", "w"); test(text, size, 2, 1, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //Test: synced parallel version w/shared tree and cpu witness data = fopen("SyncedShared_WitCPU.txt", "w"); test(text, tsize, 2, 2, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //Test: synced parallel version w/global tree and gpu witness data = fopen("Synced_WitGPU.txt", "w"); test(text, tsize, 1, 1, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //Test: synced parallel version w/shared tree and gpu witness data = fopen("SyncedShared_WitGPU.txt", "w"); test(text, tsize, 1, 2, data, maxp, inc); printf("\n\n"); fflush(stdout);// fclose(data);//*/ //Test: multiple kernel version and cpu witness data = fopen("Multiple_WitCPU.txt", "w"); test(text, tsize, 2, 3, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //Test: multiple kernel version and gpu witness data = fopen("Multiple_WitGPU.txt", "w"); test(text, tsize, 1, 3, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data); //*/ //Test: multiple kernel version and cpu witness //pattern sizes 8,16,24,32...2048 data = fopen("MultipleConstant_WitCPU.txt", "w"); test(text, tsize, 2, 4, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //Test: multiple kernel version and gpu witness //pattern sizes 8,16,24,32...2048 data = fopen("MultipleConstant_WitGPU.txt", "w"); test(text, tsize, 1, 4, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ }
a3b57d06e64d80ef90a15a040967223d93dd8223.cu
//Marina Doric //Final Project #include <stdio.h> #include "timerc.h" #include "test.cuh" #include "witness.cuh" #include "serial.cuh" //Main function *********************************************************************** int main(){ //Assume this has been run while pulling info from file "DNA.txt" //Set up the data int tsize = 2048*2048*256; //largest int allowed char* text = (char*) malloc(tsize*sizeof(char)); //text //Process text int i=0; char c = getchar(); while ((i<tsize)&&(c != EOF)){ if ((c != 13)&&(c != 10)&&(c != 0)){ text[i] = c; i += 1; } c = getchar(); } //If the text was smaller than the ascribed size, reset. tsize = i; warmup<<<1,1>>>(); //Warmup the GPU to ensure accurrate timing gerror(cudaPeekAtLastError()); //Now, let's run tests on the GPU versions and compare to CPU results //******************************************************************** FILE *data; //Was going to get run times for KMP, but KMP doesn't work. I was mistaken. /*//Get run times for KMP //The multiple kernel version encounters an illegal memory access when the pattern sizes is 65537 or larger. So, we will restrict it. int maxp = 65537; int inc = 4; //To keep the run time of testing down int size = 10; //Run on full thing //Test: multiple kernel version and cpu witness data = fopen("KMPvsMultiple.txt", "w"); test(text, tsize, 2, 4, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //FROM HERE ON, KMP WAS COMMENTED OUT to speed up data collection //Second, test constant memory on large pattern sizes. size = 2048*32; //Constraint of constant memory version maxp = size; inc = 50; //Large to speed up data collection //Test: Multiple Kernel Constant Mem on largest patterns possible data = fopen("Constant.txt", "w"); test(text, size, 2, 4, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data); //*/ //Next, Generic comparisons and tests on limited text for all four versions //Test: synced parallel version w/global tree and cpu witness maxp = 2048; //Constraint of synced versions inc = 4; //Do pattern sizes 4,8,12,16,...,2048 data = fopen("Synced_WitCPU.txt", "w"); test(text, size, 2, 1, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //Test: synced parallel version w/shared tree and cpu witness data = fopen("SyncedShared_WitCPU.txt", "w"); test(text, tsize, 2, 2, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //Test: synced parallel version w/global tree and gpu witness data = fopen("Synced_WitGPU.txt", "w"); test(text, tsize, 1, 1, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //Test: synced parallel version w/shared tree and gpu witness data = fopen("SyncedShared_WitGPU.txt", "w"); test(text, tsize, 1, 2, data, maxp, inc); printf("\n\n"); fflush(stdout);// fclose(data);//*/ //Test: multiple kernel version and cpu witness data = fopen("Multiple_WitCPU.txt", "w"); test(text, tsize, 2, 3, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //Test: multiple kernel version and gpu witness data = fopen("Multiple_WitGPU.txt", "w"); test(text, tsize, 1, 3, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data); //*/ //Test: multiple kernel version and cpu witness //pattern sizes 8,16,24,32...2048 data = fopen("MultipleConstant_WitCPU.txt", "w"); test(text, tsize, 2, 4, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ //Test: multiple kernel version and gpu witness //pattern sizes 8,16,24,32...2048 data = fopen("MultipleConstant_WitGPU.txt", "w"); test(text, tsize, 1, 4, data, maxp, inc); printf("\n\n"); fflush(stdout); fclose(data);//*/ }
a0df7d4444e3e421bfa706f440a4684af95d0568.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************\ * --- Practical Course: GPU Programming in Computer Vision --- * * time: winter term 2012/13 / March 11-18, 2013 * * project: gradient * file: gradient.cu * * \******* PLEASE ENTER YOUR CORRECT STUDENT LOGIN, NAME AND ID BELOW *********/ const char* studentLogin = "p116"; const char* studentName = "Arash Bakhtiari"; const int studentID = 03625141; /****************************************************************************\ * * In this file the following methods have to be edited or completed: * * derivativeY_sm_d(const float *inputImage, ... ) * derivativeY_sm_d(const float3 *inputImage, ... ) * gradient_magnitude_d(const float *inputImage, ... ) * gradient_magnitude_d(const float3 *inputImage, ... ) * \****************************************************************************/ #include "gradient.cuh" #define BW 16 #define BH 16 const char* getStudentLogin() { return studentLogin; }; const char* getStudentName() { return studentName; }; int getStudentID() { return studentID; }; bool checkStudentData() { return strcmp(studentLogin, "p010") != 0 && strcmp(studentName, "John Doe") != 0 && studentID != 1234567; }; bool checkStudentNameAndID() { return strcmp(studentName, "John Doe") != 0 && studentID != 1234567; }; __global__ void derivativeX_sm_d(const float *inputImage, float *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float u[BW+2][BH]; if (x < iWidth && y < iHeight) { u[threadIdx.x+1][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x); if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y]; else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1); if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y]; else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1); } __syncthreads(); if (x < iWidth && y < iHeight) *((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = 0.5f*(u[threadIdx.x+2][threadIdx.y]-u[threadIdx.x][threadIdx.y]); } __global__ void derivativeX_sm_d(const float3 *inputImage, float3 *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; float3 imgValue; __shared__ float3 u[BW+2][BH]; if (x < iWidth && y < iHeight) { u[threadIdx.x+1][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x); if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y]; else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1); if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y]; else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1); } __syncthreads(); if (x < iWidth && y < iHeight) { imgValue.x = 0.5f*(u[threadIdx.x+2][threadIdx.y].x - u[threadIdx.x][threadIdx.y].x); imgValue.y = 0.5f*(u[threadIdx.x+2][threadIdx.y].y - u[threadIdx.x][threadIdx.y].y); imgValue.z = 0.5f*(u[threadIdx.x+2][threadIdx.y].z - u[threadIdx.x][threadIdx.y].z); *((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue; } } __global__ void derivativeY_sm_d(const float *inputImage, float *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float u[BW][BH+2]; if (x < iWidth && y < iHeight) { u[threadIdx.x][threadIdx.y + 1] = *((float*) ((char*) inputImage + y * iPitchBytes) + x); if (y == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y + 1]; else if (threadIdx.y == 0) u[threadIdx.x][threadIdx.y] = *((float*) ((char*) inputImage + (y - 1) * iPitchBytes) + x); if (y == (iHeight - 1)) u[threadIdx.x][threadIdx.y + 2] = u[threadIdx.x][threadIdx.y + 1]; else if (threadIdx.y == blockDim.y - 1) u[threadIdx.x][threadIdx.y + 2] = *((float*) ((char*) inputImage + (y + 1) * iPitchBytes) + x); } __syncthreads(); if (x < iWidth && y < iHeight) *((float*) (((char*) outputImage) + y * iPitchBytes) + x) = 0.5f * (u[threadIdx.x][threadIdx.y + 2] - u[threadIdx.x][threadIdx.y]); } __global__ void derivativeY_sm_d(const float3 *inputImage, float3 *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; float3 imgValue; __shared__ float3 u[BW][BH+2]; if (x < iWidth && y < iHeight) { u[threadIdx.x][threadIdx.y + 1] = *((float3*) ((char*) inputImage + y * iPitchBytes) + x); if (y == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y + 1]; else if (threadIdx.y == 0) u[threadIdx.x][threadIdx.y] = *((float3*) ((char*) inputImage + (y - 1) * iPitchBytes) + x); if (y == (iHeight - 1)) u[threadIdx.x][threadIdx.y + 2] = u[threadIdx.x][threadIdx.y + 1]; else if (threadIdx.y == blockDim.y - 1) u[threadIdx.x][threadIdx.y + 2] = *((float3*) ((char*) inputImage + (y + 1) * iPitchBytes) + x); } __syncthreads(); if (x < iWidth && y < iHeight) { imgValue.x = 0.5f*(u[threadIdx.x][threadIdx.y+2].x - u[threadIdx.x][threadIdx.y].x); imgValue.y = 0.5f*(u[threadIdx.x][threadIdx.y+2].y - u[threadIdx.x][threadIdx.y].y); imgValue.z = 0.5f*(u[threadIdx.x][threadIdx.y+2].z - u[threadIdx.x][threadIdx.y].z); *((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue; } } __global__ void gradient_magnitude_d(const float *inputImage, float *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float u[BW+2][BH+2]; if (x < iWidth && y < iHeight) { u[threadIdx.x+1][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x); if (x == 0) u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1]; else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1); if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1]; else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1); if (y == 0) u[threadIdx.x+1][threadIdx.y] = u[threadIdx.x+1][threadIdx.y + 1]; else if (threadIdx.y == 0) u[threadIdx.x+1][threadIdx.y] = *((float*) ((char*) inputImage + (y - 1) * iPitchBytes) + x); if (y == (iHeight - 1)) u[threadIdx.x+1][threadIdx.y + 2] = u[threadIdx.x+1][threadIdx.y + 1]; else if (threadIdx.y == blockDim.y - 1) u[threadIdx.x+1][threadIdx.y + 2] = *((float*) ((char*) inputImage + (y + 1) * iPitchBytes) + x); } __syncthreads(); float tempDerX; float tempDerY; if (x < iWidth && y < iHeight) { tempDerX = 0.5f*(u[threadIdx.x + 2][threadIdx.y+1]-u[threadIdx.x][threadIdx.y+1]); tempDerY = 0.5f*(u[threadIdx.x+1][threadIdx.y + 2] - u[threadIdx.x+1][threadIdx.y]); *((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = sqrt( tempDerX*tempDerX + tempDerY*tempDerY ); } } __global__ void gradient_magnitude_d(const float3 *inputImage, float3 *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { // // ### implement me ### const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float3 u[BW+2][BH+2]; if (x < iWidth && y < iHeight) { u[threadIdx.x+1][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x); if (x == 0) u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1]; else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1); if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1]; else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1); if (y == 0) u[threadIdx.x+1][threadIdx.y] = u[threadIdx.x+1][threadIdx.y + 1]; else if (threadIdx.y == 0) u[threadIdx.x+1][threadIdx.y] = *((float3*) ((char*) inputImage + (y - 1) * iPitchBytes) + x); if (y == (iHeight - 1)) u[threadIdx.x+1][threadIdx.y + 2] = u[threadIdx.x+1][threadIdx.y + 1]; else if (threadIdx.y == blockDim.y - 1) u[threadIdx.x+1][threadIdx.y + 2] = *((float3*) ((char*) inputImage + (y + 1) * iPitchBytes) + x); } __syncthreads(); float3 normValue; float3 xValue; float3 yValue; if (x < iWidth && y < iHeight) { // x derivatives xValue.x = 0.5f * (u[threadIdx.x + 2][threadIdx.y+1].x - u[threadIdx.x][threadIdx.y+1].x); xValue.y = 0.5f * (u[threadIdx.x + 2][threadIdx.y+1].y - u[threadIdx.x][threadIdx.y+1].y); xValue.z = 0.5f * (u[threadIdx.x + 2][threadIdx.y+1].z - u[threadIdx.x][threadIdx.y+1].z); // y derivatives yValue.x = 0.5f * (u[threadIdx.x+1][threadIdx.y + 2].x - u[threadIdx.x+1][threadIdx.y].x); yValue.y = 0.5f * (u[threadIdx.x+1][threadIdx.y + 2].y - u[threadIdx.x+1][threadIdx.y].y); yValue.z = 0.5f * (u[threadIdx.x+1][threadIdx.y + 2].z - u[threadIdx.x+1][threadIdx.y].z); normValue.x = sqrt(xValue.x*xValue.x + yValue.x*yValue.x); normValue.y = sqrt(xValue.y*xValue.y + yValue.y*yValue.y); normValue.z = sqrt(xValue.z*xValue.z + yValue.z*yValue.z); *((float3*) (((char*) outputImage) + y * iPitchBytes) + x) = normValue ; } } void gpu_derivative_sm_d(const float *inputImage, float *outputImage, int iWidth, int iHeight, int iSpectrum, int mode) { size_t iPitchBytes; float *inputImage_d = 0, *outputImage_d = 0; dim3 blockSize(BW, BH); dim3 gridSize( (int)ceil(iWidth/(float)BW), (int)ceil(iHeight/(float)BH) ); //dim3 smSize(BW+2,BH); if(iSpectrum == 1) { cutilSafeCall( hipMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) ); cutilSafeCall( hipMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) ); cutilSafeCall( hipMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float), iWidth*sizeof(float), iHeight, hipMemcpyHostToDevice) ); if (mode == 0) hipLaunchKernelGGL(( derivativeX_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes); else if (mode == 1) hipLaunchKernelGGL(( derivativeY_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes); else if (mode == 2) hipLaunchKernelGGL(( gradient_magnitude_d), dim3(gridSize), dim3(blockSize), 0, 0, inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes); cutilSafeCall( hipDeviceSynchronize() ); cutilSafeCall( hipMemcpy2D(outputImage, iWidth*sizeof(float), outputImage_d, iPitchBytes, iWidth*sizeof(float), iHeight, hipMemcpyDeviceToHost) ); } else if(iSpectrum == 3) { cutilSafeCall( hipMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) ); cutilSafeCall( hipMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) ); cutilSafeCall( hipMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float3), iWidth*sizeof(float3), iHeight, hipMemcpyHostToDevice) ); if (mode == 0) hipLaunchKernelGGL(( derivativeX_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, (float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes); else if (mode == 1) hipLaunchKernelGGL(( derivativeY_sm_d), dim3(gridSize), dim3(blockSize), 0, 0, (float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes); else if (mode == 2) hipLaunchKernelGGL(( gradient_magnitude_d), dim3(gridSize), dim3(blockSize), 0, 0, (float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes); cutilSafeCall( hipDeviceSynchronize() ); cutilSafeCall( hipMemcpy2D(outputImage, iWidth*sizeof(float3), outputImage_d, iPitchBytes, iWidth*sizeof(float3), iHeight, hipMemcpyDeviceToHost) ); } cutilSafeCall( hipFree(inputImage_d) ); cutilSafeCall( hipFree(outputImage_d) ); }
a0df7d4444e3e421bfa706f440a4684af95d0568.cu
/****************************************************************************\ * --- Practical Course: GPU Programming in Computer Vision --- * * time: winter term 2012/13 / March 11-18, 2013 * * project: gradient * file: gradient.cu * * \******* PLEASE ENTER YOUR CORRECT STUDENT LOGIN, NAME AND ID BELOW *********/ const char* studentLogin = "p116"; const char* studentName = "Arash Bakhtiari"; const int studentID = 03625141; /****************************************************************************\ * * In this file the following methods have to be edited or completed: * * derivativeY_sm_d(const float *inputImage, ... ) * derivativeY_sm_d(const float3 *inputImage, ... ) * gradient_magnitude_d(const float *inputImage, ... ) * gradient_magnitude_d(const float3 *inputImage, ... ) * \****************************************************************************/ #include "gradient.cuh" #define BW 16 #define BH 16 const char* getStudentLogin() { return studentLogin; }; const char* getStudentName() { return studentName; }; int getStudentID() { return studentID; }; bool checkStudentData() { return strcmp(studentLogin, "p010") != 0 && strcmp(studentName, "John Doe") != 0 && studentID != 1234567; }; bool checkStudentNameAndID() { return strcmp(studentName, "John Doe") != 0 && studentID != 1234567; }; __global__ void derivativeX_sm_d(const float *inputImage, float *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float u[BW+2][BH]; if (x < iWidth && y < iHeight) { u[threadIdx.x+1][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x); if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y]; else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1); if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y]; else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1); } __syncthreads(); if (x < iWidth && y < iHeight) *((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = 0.5f*(u[threadIdx.x+2][threadIdx.y]-u[threadIdx.x][threadIdx.y]); } __global__ void derivativeX_sm_d(const float3 *inputImage, float3 *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; float3 imgValue; __shared__ float3 u[BW+2][BH]; if (x < iWidth && y < iHeight) { u[threadIdx.x+1][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x); if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y]; else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1); if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y]; else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1); } __syncthreads(); if (x < iWidth && y < iHeight) { imgValue.x = 0.5f*(u[threadIdx.x+2][threadIdx.y].x - u[threadIdx.x][threadIdx.y].x); imgValue.y = 0.5f*(u[threadIdx.x+2][threadIdx.y].y - u[threadIdx.x][threadIdx.y].y); imgValue.z = 0.5f*(u[threadIdx.x+2][threadIdx.y].z - u[threadIdx.x][threadIdx.y].z); *((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue; } } __global__ void derivativeY_sm_d(const float *inputImage, float *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float u[BW][BH+2]; if (x < iWidth && y < iHeight) { u[threadIdx.x][threadIdx.y + 1] = *((float*) ((char*) inputImage + y * iPitchBytes) + x); if (y == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y + 1]; else if (threadIdx.y == 0) u[threadIdx.x][threadIdx.y] = *((float*) ((char*) inputImage + (y - 1) * iPitchBytes) + x); if (y == (iHeight - 1)) u[threadIdx.x][threadIdx.y + 2] = u[threadIdx.x][threadIdx.y + 1]; else if (threadIdx.y == blockDim.y - 1) u[threadIdx.x][threadIdx.y + 2] = *((float*) ((char*) inputImage + (y + 1) * iPitchBytes) + x); } __syncthreads(); if (x < iWidth && y < iHeight) *((float*) (((char*) outputImage) + y * iPitchBytes) + x) = 0.5f * (u[threadIdx.x][threadIdx.y + 2] - u[threadIdx.x][threadIdx.y]); } __global__ void derivativeY_sm_d(const float3 *inputImage, float3 *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; float3 imgValue; __shared__ float3 u[BW][BH+2]; if (x < iWidth && y < iHeight) { u[threadIdx.x][threadIdx.y + 1] = *((float3*) ((char*) inputImage + y * iPitchBytes) + x); if (y == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y + 1]; else if (threadIdx.y == 0) u[threadIdx.x][threadIdx.y] = *((float3*) ((char*) inputImage + (y - 1) * iPitchBytes) + x); if (y == (iHeight - 1)) u[threadIdx.x][threadIdx.y + 2] = u[threadIdx.x][threadIdx.y + 1]; else if (threadIdx.y == blockDim.y - 1) u[threadIdx.x][threadIdx.y + 2] = *((float3*) ((char*) inputImage + (y + 1) * iPitchBytes) + x); } __syncthreads(); if (x < iWidth && y < iHeight) { imgValue.x = 0.5f*(u[threadIdx.x][threadIdx.y+2].x - u[threadIdx.x][threadIdx.y].x); imgValue.y = 0.5f*(u[threadIdx.x][threadIdx.y+2].y - u[threadIdx.x][threadIdx.y].y); imgValue.z = 0.5f*(u[threadIdx.x][threadIdx.y+2].z - u[threadIdx.x][threadIdx.y].z); *((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue; } } __global__ void gradient_magnitude_d(const float *inputImage, float *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float u[BW+2][BH+2]; if (x < iWidth && y < iHeight) { u[threadIdx.x+1][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x); if (x == 0) u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1]; else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1); if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1]; else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1); if (y == 0) u[threadIdx.x+1][threadIdx.y] = u[threadIdx.x+1][threadIdx.y + 1]; else if (threadIdx.y == 0) u[threadIdx.x+1][threadIdx.y] = *((float*) ((char*) inputImage + (y - 1) * iPitchBytes) + x); if (y == (iHeight - 1)) u[threadIdx.x+1][threadIdx.y + 2] = u[threadIdx.x+1][threadIdx.y + 1]; else if (threadIdx.y == blockDim.y - 1) u[threadIdx.x+1][threadIdx.y + 2] = *((float*) ((char*) inputImage + (y + 1) * iPitchBytes) + x); } __syncthreads(); float tempDerX; float tempDerY; if (x < iWidth && y < iHeight) { tempDerX = 0.5f*(u[threadIdx.x + 2][threadIdx.y+1]-u[threadIdx.x][threadIdx.y+1]); tempDerY = 0.5f*(u[threadIdx.x+1][threadIdx.y + 2] - u[threadIdx.x+1][threadIdx.y]); *((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = sqrt( tempDerX*tempDerX + tempDerY*tempDerY ); } } __global__ void gradient_magnitude_d(const float3 *inputImage, float3 *outputImage, int iWidth, int iHeight, size_t iPitchBytes) { // // ### implement me ### const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float3 u[BW+2][BH+2]; if (x < iWidth && y < iHeight) { u[threadIdx.x+1][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x); if (x == 0) u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1]; else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1); if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1]; else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1); if (y == 0) u[threadIdx.x+1][threadIdx.y] = u[threadIdx.x+1][threadIdx.y + 1]; else if (threadIdx.y == 0) u[threadIdx.x+1][threadIdx.y] = *((float3*) ((char*) inputImage + (y - 1) * iPitchBytes) + x); if (y == (iHeight - 1)) u[threadIdx.x+1][threadIdx.y + 2] = u[threadIdx.x+1][threadIdx.y + 1]; else if (threadIdx.y == blockDim.y - 1) u[threadIdx.x+1][threadIdx.y + 2] = *((float3*) ((char*) inputImage + (y + 1) * iPitchBytes) + x); } __syncthreads(); float3 normValue; float3 xValue; float3 yValue; if (x < iWidth && y < iHeight) { // x derivatives xValue.x = 0.5f * (u[threadIdx.x + 2][threadIdx.y+1].x - u[threadIdx.x][threadIdx.y+1].x); xValue.y = 0.5f * (u[threadIdx.x + 2][threadIdx.y+1].y - u[threadIdx.x][threadIdx.y+1].y); xValue.z = 0.5f * (u[threadIdx.x + 2][threadIdx.y+1].z - u[threadIdx.x][threadIdx.y+1].z); // y derivatives yValue.x = 0.5f * (u[threadIdx.x+1][threadIdx.y + 2].x - u[threadIdx.x+1][threadIdx.y].x); yValue.y = 0.5f * (u[threadIdx.x+1][threadIdx.y + 2].y - u[threadIdx.x+1][threadIdx.y].y); yValue.z = 0.5f * (u[threadIdx.x+1][threadIdx.y + 2].z - u[threadIdx.x+1][threadIdx.y].z); normValue.x = sqrt(xValue.x*xValue.x + yValue.x*yValue.x); normValue.y = sqrt(xValue.y*xValue.y + yValue.y*yValue.y); normValue.z = sqrt(xValue.z*xValue.z + yValue.z*yValue.z); *((float3*) (((char*) outputImage) + y * iPitchBytes) + x) = normValue ; } } void gpu_derivative_sm_d(const float *inputImage, float *outputImage, int iWidth, int iHeight, int iSpectrum, int mode) { size_t iPitchBytes; float *inputImage_d = 0, *outputImage_d = 0; dim3 blockSize(BW, BH); dim3 gridSize( (int)ceil(iWidth/(float)BW), (int)ceil(iHeight/(float)BH) ); //dim3 smSize(BW+2,BH); if(iSpectrum == 1) { cutilSafeCall( cudaMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) ); cutilSafeCall( cudaMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) ); cutilSafeCall( cudaMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float), iWidth*sizeof(float), iHeight, cudaMemcpyHostToDevice) ); if (mode == 0) derivativeX_sm_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes); else if (mode == 1) derivativeY_sm_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes); else if (mode == 2) gradient_magnitude_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes); cutilSafeCall( cudaThreadSynchronize() ); cutilSafeCall( cudaMemcpy2D(outputImage, iWidth*sizeof(float), outputImage_d, iPitchBytes, iWidth*sizeof(float), iHeight, cudaMemcpyDeviceToHost) ); } else if(iSpectrum == 3) { cutilSafeCall( cudaMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) ); cutilSafeCall( cudaMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) ); cutilSafeCall( cudaMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float3), iWidth*sizeof(float3), iHeight, cudaMemcpyHostToDevice) ); if (mode == 0) derivativeX_sm_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes); else if (mode == 1) derivativeY_sm_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes); else if (mode == 2) gradient_magnitude_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes); cutilSafeCall( cudaThreadSynchronize() ); cutilSafeCall( cudaMemcpy2D(outputImage, iWidth*sizeof(float3), outputImage_d, iPitchBytes, iWidth*sizeof(float3), iHeight, cudaMemcpyDeviceToHost) ); } cutilSafeCall( cudaFree(inputImage_d) ); cutilSafeCall( cudaFree(outputImage_d) ); }
66690f9215c049a47051ff48ad7ff57702d6469f.hip
// !!! This is a file automatically generated by hipify!!! /* * material_normal.cu - hit programs for normal materials on GPUs. */ #include "accelerad_copyright.h" #include "otypes.h" /* For definition of MAT_METAL */ #include <optix.h> #include <optixu/optixu_math_namespace.h> #include "optix_shader_ray.h" #ifdef CONTRIB_DOUBLE #include "optix_double.h" #endif using namespace optix; #define AMBIENT #define TRANSMISSION #ifndef MAXITER #define MAXITER 10 /* maximum # specular ray attempts */ #endif #define MAXSPART 64 /* maximum partitions per source */ //#define frandom() (rnd( prd.seed )/float(RAND_MAX)) //#define frandom() (rnd( prd.seed )) /* specularity flags */ #define SP_REFL 01 /* has reflected specular component */ #define SP_TRAN 02 /* has transmitted specular */ #define SP_PURE 04 /* purely specular (zero roughness) */ #define SP_FLAT 010 /* flat reflecting surface */ #define SP_RBLT 020 /* reflection below sample threshold */ #define SP_TBLT 040 /* transmission below threshold */ typedef struct { unsigned int specfl; /* specularity flags, defined above */ float3 mcolor; /* color of this material */ float3 scolor; /* color of specular component */ //float3 vrefl; /* vector in direction of reflected ray */ float3 prdir; /* vector in transmitted direction */ float3 normal; float3 hit; float alpha2; /* roughness squared */ float rdiff, rspec; /* reflected specular, diffuse */ float trans; /* transmissivity */ float tdiff, tspec; /* transmitted specular, diffuse */ float3 pnorm; /* perturbed surface normal */ float pdot; /* perturbed dot product */ } NORMDAT; /* normal material data */ /* Context variables */ rtDeclareVariable(rtObject, top_object, , ); rtDeclareVariable(rtObject, top_ambient, , ); rtDeclareVariable(float, specthresh, , ); /* This is the minimum fraction of reflection or transmission, under which no specular sampling is performed */ rtDeclareVariable(float, specjitter, , ); /* specular sampling (ss) */ #ifdef LIGHTS rtDeclareVariable(float, dstrsrc, , ); /* direct jitter (dj) */ rtDeclareVariable(float, srcsizerat, , ); /* direct sampling ratio (ds) */ //rtDeclareVariable(float, shadthresh, , ); /* direct threshold (dt) */ //rtDeclareVariable(float, shadcert, , ); /* direct certainty (dc) */ //rtDeclareVariable(int, directrelay, , ); /* direct relays for secondary sources (dr) */ //rtDeclareVariable(int, vspretest, , ); /* direct presampling density for secondary sources (dp) */ #endif /* LIGHTS */ #ifdef AMBIENT rtDeclareVariable(float3, ambval, , ); /* This is the final value used in place of an indirect light calculation */ rtDeclareVariable(int, ambvwt, , ); /* As new indirect irradiances are computed, they will modify the default ambient value in a moving average, with the specified weight assigned to the initial value given on the command and all other weights set to 1 */ rtDeclareVariable(int, ambounce, , ); /* Ambient bounces (ab) */ //rtDeclareVariable(int, ambres, , ); /* Ambient resolution (ar) */ rtDeclareVariable(float, ambacc, , ); /* Ambient accuracy (aa). This value will approximately equal the error from indirect illuminance interpolation */ rtDeclareVariable(int, ambdiv, , ); /* Ambient divisions (ad) */ rtDeclareVariable(int, ambdiv_final, , ); /* Number of ambient divisions for final-pass fill (ag) */ //rtDeclareVariable(int, ambssamp, , ); /* Ambient super-samples (as) */ rtDeclareVariable(float, avsum, , ); /* computed ambient value sum (log) */ rtDeclareVariable(unsigned int, navsum, , ); /* number of values in avsum */ #endif /* AMBIENT */ rtDeclareVariable(float, exposure, , ) = 0.0f; /* Current exposure (-pe), zero unless called from rvu */ rtBuffer<DistantLight> lights; /* Geometry instance variables */ #ifdef LIGHTS rtBuffer<float3> vertex_buffer; rtBuffer<uint3> lindex_buffer; // position indices #endif RT_METHOD float3 dirnorm(Ray *shadow_ray, PerRayData_shadow *shadow_prd, const NORMDAT *nd, const float& omega, const float3& ray_dir, PerRayData_radiance &prd); RT_METHOD float3 gaussamp(const NORMDAT *nd, const float3& ray_dir, PerRayData_radiance &prd); #ifdef AMBIENT RT_METHOD float3 multambient(float3 aval, const float3& normal, const float3& pnormal, const float3& hit, const unsigned int& ambincl, PerRayData_radiance &prd); #ifdef DAYSIM_COMPATIBLE RT_METHOD int doambient(float3 *rcol, const float3& normal, const float3& pnormal, const float3& hit, PerRayData_radiance &prd, DaysimCoef dc); #else RT_METHOD int doambient(float3 *rcol, const float3& normal, const float3& pnormal, const float3& hit, PerRayData_radiance &prd); #endif //RT_METHOD int ambsample( AMBHEMI *hp, const int& i, const int& j, const float3 normal, const float3 hit ); #endif /* AMBIENT */ #ifdef LIGHTS RT_METHOD unsigned int flatpart( const float3& v, const float3& r0, const float3& r1, const float3& r2, const float& weight ); RT_METHOD float solid_angle( const float3& r0, const float3& r1, const float3& r2 ); RT_METHOD float3 barycentric( float2& lambda, const float3& r0, const float3& r1, const float3& r2, const int flip ); #endif /* LIGHTS */ //RT_METHOD float2 multisamp2(float r); //RT_METHOD int ilhash(int3 d); RT_CALLABLE_PROGRAM PerRayData_shadow closest_hit_normal_shadow(IntersectData const&data, PerRayData_shadow prd_shadow) { NORMDAT nd; /* check for back side */ nd.pnorm = faceforward(data.world_shading_normal, -data.ray_direction, data.world_geometric_normal); nd.normal = faceforward(data.world_geometric_normal, -data.ray_direction, data.world_geometric_normal); nd.hit = data.hit; nd.mcolor = data.mat.color; nd.rspec = data.mat.params.n.spec; nd.alpha2 = data.mat.params.n.rough * data.mat.params.n.rough; nd.specfl = 0u; /* specularity flags */ #ifdef TRANSMISSION if (data.mat.params.n.trans > 0.0f) { // type == MAT_TRANS /* get roughness */ if (nd.alpha2 <= FTINY) { nd.specfl |= SP_PURE; // label this as a purely specular reflection } /* perturb normal */ float3 pert = nd.normal - nd.pnorm; int hastexture = dot(pert, pert) > FTINY * FTINY; nd.pdot = -dot(data.ray_direction, nd.pnorm); if (nd.pdot < 0.0f) { /* fix orientation from raynormal in raytrace.c */ nd.pnorm += 2.0f * nd.pdot * data.ray_direction; nd.pdot = -nd.pdot; } if (nd.pdot < 0.001f) nd.pdot = 0.001f; /* non-zero for dirnorm() */ // if it's a face or a ring label as flat (currently we only support triangles, so everything is flat) nd.specfl |= SP_FLAT; /* modify material color */ //nd.mcolor *= rtTex3D(rtTextureId id, texcoord.x, texcoord.y, texcoord.z).xyz; /* compute Fresnel approx. */ float fest = 0.0f; if (nd.specfl & SP_PURE && nd.rspec >= FRESTHRESH) { fest = FRESNE(nd.pdot); nd.rspec += fest * (1.0f - nd.rspec); } /* compute transmission */ nd.prdir = data.ray_direction; nd.trans = data.mat.params.n.trans * (1.0f - nd.rspec); nd.tspec = nd.trans * data.mat.params.n.tspec; if (nd.tspec > FTINY) { nd.specfl |= SP_TRAN; /* check threshold */ if (!(nd.specfl & SP_PURE) && specthresh >= nd.tspec - FTINY) nd.specfl |= SP_TBLT; if (hastexture) { //TODO only if ambient depth == 0 if (dot(nd.prdir - pert, nd.normal) < -FTINY) nd.prdir = normalize(nd.prdir - pert); /* OK */ } } } /* transmitted ray */ if ((nd.specfl&(SP_TRAN | SP_PURE | SP_TBLT)) == (SP_TRAN | SP_PURE)) { #ifdef CONTRIB prd_shadow.rcoef *= nd.mcolor * nd.tspec; #endif Ray trans_ray = make_Ray(nd.hit, nd.prdir, SHADOW_RAY, ray_start(nd.hit, nd.prdir, nd.normal, RAY_START), RAY_END); rtTrace(top_object, trans_ray, prd_shadow); prd_shadow.result *= nd.mcolor * nd.tspec; #ifdef DAYSIM_COMPATIBLE daysimScale(prd_shadow.dc, nd.mcolor.x * nd.tspec); #endif } #endif /* TRANSMISSION */ return prd_shadow; } RT_CALLABLE_PROGRAM PerRayData_radiance closest_hit_normal_radiance(IntersectData const&data, PerRayData_radiance prd) { NORMDAT nd; /* check for back side */ nd.pnorm = faceforward(data.world_shading_normal, -data.ray_direction, data.world_geometric_normal); nd.normal = faceforward(data.world_geometric_normal, -data.ray_direction, data.world_geometric_normal); nd.hit = data.hit; PerRayData_radiance new_prd; float3 result = prd.mirror = make_float3(0.0f); nd.mcolor = data.mat.color; nd.scolor = make_float3(0.0f); nd.rspec = data.mat.params.n.spec; nd.alpha2 = data.mat.params.n.rough * data.mat.params.n.rough; nd.specfl = 0u; /* specularity flags */ /* get roughness */ if (nd.alpha2 <= FTINY) { nd.specfl |= SP_PURE; // label this as a purely specular reflection } /* perturb normal */ float3 pert = nd.normal - nd.pnorm; int hastexture = dot(pert, pert) > FTINY * FTINY; nd.pdot = -dot(data.ray_direction, nd.pnorm); if (nd.pdot < 0.0f) { /* fix orientation from raynormal in raytrace.c */ nd.pnorm += 2.0f * nd.pdot * data.ray_direction; nd.pdot = -nd.pdot; } if (nd.pdot < 0.001f) nd.pdot = 0.001f; /* non-zero for dirnorm() */ // if it's a face or a ring label as flat (currently we only support triangles, so everything is flat) nd.specfl |= SP_FLAT; /* modify material color */ //nd.mcolor *= rtTex3D(rtTextureId id, texcoord.x, texcoord.y, texcoord.z).xyz; /* compute Fresnel approx. */ float fest = 0.0f; if (nd.specfl & SP_PURE && nd.rspec >= FRESTHRESH) { fest = FRESNE(nd.pdot); nd.rspec += fest * (1.0f - nd.rspec); } /* compute transmission */ nd.tdiff = nd.tspec = nd.trans = 0.0f; // because it's opaque #ifdef TRANSMISSION nd.prdir = data.ray_direction; if (data.mat.params.n.trans > 0.0f) { // data.mat.type == MAT_TRANS nd.trans = data.mat.params.n.trans * (1.0f - nd.rspec); nd.tspec = nd.trans * data.mat.params.n.tspec; nd.tdiff = nd.trans - nd.tspec; if (nd.tspec > FTINY) { nd.specfl |= SP_TRAN; /* check threshold */ if (!(nd.specfl & SP_PURE) && specthresh >= nd.tspec - FTINY) nd.specfl |= SP_TBLT; if (!prd.ambient_depth && hastexture) { if (dot(nd.prdir - pert, nd.normal) < -FTINY) nd.prdir = normalize(nd.prdir - pert); /* OK */ } } } /* diffuse reflection */ nd.rdiff = 1.0f - nd.trans - nd.rspec; /* transmitted ray */ if ((nd.specfl&(SP_TRAN | SP_PURE | SP_TBLT)) == (SP_TRAN | SP_PURE) && rayorigin(new_prd, prd, nd.mcolor * nd.tspec, 0, 0)) { #ifdef DAYSIM_COMPATIBLE new_prd.dc = daysimNext(prd.dc); #endif setupPayload(new_prd); Ray trans_ray = make_Ray(nd.hit, nd.prdir, RADIANCE_RAY, ray_start(nd.hit, nd.prdir, nd.normal, RAY_START), new_prd.tmax); rtTrace(top_object, trans_ray, new_prd); new_prd.result *= nd.mcolor * nd.tspec; result += new_prd.result; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, new_prd.dc, nd.mcolor.x * nd.tspec); #endif if (nd.tspec >= 1.0f - FTINY) { /* completely transparent */ prd.mirror = new_prd.mirror * nd.mcolor * nd.tspec; prd.mirror_distance = data.t + new_prd.mirror_distance; prd.distance = data.t + new_prd.distance; } else if (nd.tspec > nd.tdiff + nd.rdiff) prd.distance = data.t + rayDistance(new_prd); resolvePayload(prd, new_prd); } #endif // return if it's a shadow ray, which it isn't /* get specular reflection */ if (nd.rspec > FTINY) { nd.specfl |= SP_REFL; /* compute specular color */ if (data.mat.type != MAT_METAL) { nd.scolor = make_float3(nd.rspec); } else { if (fest > FTINY) { float d = data.mat.params.n.spec * (1.0f - fest); nd.scolor = fest + nd.mcolor * d; } else { nd.scolor = nd.mcolor * nd.rspec; } } /* check threshold */ if (!(nd.specfl & SP_PURE) && specthresh >= nd.rspec - FTINY) { nd.specfl |= SP_RBLT; } } /* reflected ray */ if ((nd.specfl&(SP_REFL | SP_PURE | SP_RBLT)) == (SP_REFL | SP_PURE) && rayorigin(new_prd, prd, nd.scolor, 1, 0)) { #ifdef DAYSIM_COMPATIBLE new_prd.dc = daysimNext(prd.dc); #endif setupPayload(new_prd); float3 vrefl = reflect(data.ray_direction, nd.pnorm); Ray refl_ray = make_Ray(nd.hit, vrefl, RADIANCE_RAY, ray_start(nd.hit, vrefl, nd.normal, RAY_START), new_prd.tmax); rtTrace(top_object, refl_ray, new_prd); new_prd.result *= nd.scolor; prd.mirror = new_prd.result; result += new_prd.result; prd.mirror_distance = data.t; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, new_prd.dc, nd.scolor.x); #endif if (nd.specfl & SP_FLAT && (prd.ambient_depth || !hastexture)) prd.mirror_distance += rayDistance(new_prd); resolvePayload(prd, new_prd); } if (!(nd.specfl & SP_PURE && nd.rdiff <= FTINY && nd.tdiff <= FTINY)) { /* not 100% pure specular */ /* checks *BLT flags */ if (!(nd.specfl & SP_PURE)) result += gaussamp(&nd, data.ray_direction, prd); #ifdef AMBIENT /* ambient from this side */ if (nd.rdiff > FTINY) { float3 aval = nd.mcolor * nd.rdiff; /* modified by material color */ if (nd.specfl & SP_RBLT) /* add in specular as well? */ aval += nd.scolor; result += multambient(aval, nd.normal, nd.pnorm, nd.hit, data.mat.params.n.ambincl, prd); /* add to returned color */ } #ifdef TRANSMISSION /* ambient from other side */ if (nd.tdiff > FTINY) { float3 aval = nd.mcolor; /* modified by material color */ if (nd.specfl & SP_TBLT) aval *= nd.trans; else aval *= nd.tdiff; result += multambient(aval, -nd.normal, -nd.pnorm, nd.hit, data.mat.params.n.ambincl, prd); /* add to returned color */ } #endif /* TRANSMISSION */ #endif /* AMBIENT */ /* add direct component */ // This is the call to direct() in source.c // Let's start at line 447, and not bother with sorting for now // compute direct lighting PerRayData_shadow shadow_prd; #ifdef DAYSIM_COMPATIBLE shadow_prd.dc = daysimNext(prd.dc); #endif Ray shadow_ray = make_Ray(nd.hit, nd.pnorm, SHADOW_RAY, RAY_START, RAY_END); /* contributions from distant lights (mainly the sun) */ unsigned int num_lights = lights.size(); for (unsigned int i = 0u; i < num_lights; i++) { const DistantLight light = lights[i]; if ( light.casts_shadow ) { shadow_prd.target = i; shadow_ray.direction = normalize( light.pos ); //TODO implement direct jitter for distant light sources shadow_ray.tmin = ray_start(nd.hit, shadow_ray.direction, nd.normal, RAY_START); shadow_ray.tmax = RAY_END; result += dirnorm(&shadow_ray, &shadow_prd, &nd, light.solid_angle, data.ray_direction, prd); } } #ifdef LIGHTS /* contributions from nearby lights */ num_lights = lindex_buffer.size(); for (unsigned int i = 0u; i < num_lights; i++) { const uint3 v_idx = lindex_buffer[i]; const float3 r0 = vertex_buffer[v_idx.x] - nd.hit; const float3 r1 = vertex_buffer[v_idx.y] - nd.hit; const float3 r2 = vertex_buffer[v_idx.z] - nd.hit; float3 rdir = ( r0 + r1 + r2 ) / 3.0f; const unsigned int divs = flatpart( rdir, r0, r1, r2, prd.weight ); //TODO divisions should be smaller closer to the light source const float step = 1.0f / divs; for ( int j = 0; j < divs; j++ ) for ( int k = 0; k < divs; k++ ) { float2 lambda = make_float2( step * j, step * k ); const float3 p0 = barycentric( lambda, r0, r1, r2, k + j >= divs ); lambda = make_float2( step * ( j + 1 ), step * k ); const float3 p1 = barycentric( lambda, r0, r1, r2, k + j >= divs ); lambda = make_float2( step * j, step * ( k + 1 ) ); const float3 p2 = barycentric( lambda, r0, r1, r2, k + j >= divs ); const float omega = solid_angle( p0, p1, p2 ); if ( omega > FTINY ) { /* from nextssamp in srcsamp.c */ rdir = ( p0 + p1 + p2 ) / 3.0f; if ( dstrsrc > FTINY ) { /* jitter sample using random barycentric coordinates */ lambda = make_float2( hiprand_uniform( prd.state ), hiprand_uniform( prd.state ) ); float3 vpos = barycentric( lambda, p0, p1, p2, lambda.x + lambda.y >= 1.0f ); rdir += dstrsrc * ( vpos - rdir ); } shadow_prd.target = -v_idx.x - 1; //TODO find a better way to identify surface shadow_ray.direction = normalize( rdir ); shadow_ray.tmin = ray_start(nd.hit, shadow_ray.direction, nd.normal, RAY_START); shadow_ray.tmax = length(rdir) * 1.0001f; result += dirnorm(&shadow_ray, &shadow_prd, &nd, omega, data.ray_direction, prd); } } } #endif /* LIGHTS */ } // pass the color back up the tree prd.result = result; return prd; } /* compute source contribution */ RT_METHOD float3 dirnorm(Ray *shadow_ray, PerRayData_shadow *shadow_prd, const NORMDAT *nd, const float& omega, const float3& ray_dir, PerRayData_radiance &prd) { float3 cval = make_float3( 0.0f ); float ldot = dot(nd->pnorm, shadow_ray->direction); #ifdef TRANSMISSION if (ldot < 0.0f ? nd->trans <= FTINY : nd->trans >= 1.0f - FTINY) #else if ( ldot <= FTINY ) #endif return cval; /* Fresnel estimate */ float lrdiff = nd->rdiff; float ltdiff = nd->tdiff; if (nd->specfl & SP_PURE && nd->rspec >= FRESTHRESH && (lrdiff > FTINY) | (ltdiff > FTINY)) { float dtmp = 1.0f - FRESNE(fabs(ldot)); lrdiff *= dtmp; ltdiff *= dtmp; } if (ldot > FTINY && lrdiff > FTINY) { /* * Compute and add diffuse reflected component to returned * color. The diffuse reflected component will always be * modified by the color of the material. */ float dtmp = ldot * omega * lrdiff * M_1_PIf; cval += nd->mcolor * dtmp; } #ifdef TRANSMISSION if (ldot < -FTINY && ltdiff > FTINY) { /* * Compute diffuse transmission. */ float dtmp = -ldot * omega * ltdiff * M_1_PIf; cval += nd->mcolor * dtmp; } #endif if (ldot > FTINY && (nd->specfl&(SP_REFL | SP_PURE)) == SP_REFL) { /* * Compute specular reflection coefficient using * Gaussian distribution model. */ /* roughness */ float dtmp = nd->alpha2; /* + source if flat */ if (nd->specfl & SP_FLAT) dtmp += omega * 0.25f * M_1_PIf; /* half vector */ float3 vtmp = shadow_ray->direction - ray_dir; float d2 = dot(vtmp, nd->pnorm); d2 *= d2; float d3 = dot( vtmp, vtmp ); float d4 = (d3 - d2) / d2; /* new W-G-M-D model */ dtmp = expf(-d4/dtmp) * d3 / (M_PIf * d2*d2 * dtmp); /* worth using? */ if (dtmp > FTINY) { dtmp *= ldot * omega; cval += nd->scolor * dtmp; } } #ifdef TRANSMISSION if (ldot < -FTINY && (nd->specfl&(SP_TRAN | SP_PURE)) == SP_TRAN) { /* * Compute specular transmission. Specular transmission * is always modified by material color. */ /* roughness + source */ float dtmp = nd->alpha2 + omega * M_1_PIf; /* Gaussian */ dtmp = expf((2.0f * dot(nd->prdir, shadow_ray->direction) - 2.0f) / dtmp) / (M_PIf * dtmp); // may need to perturb direction /* worth using? */ if (dtmp > FTINY) { dtmp *= nd->tspec * omega * sqrtf(-ldot / nd->pdot); cval += nd->mcolor * dtmp; } } #endif /* from direct() in source.c */ if (fmaxf(cval) <= 0.0f) return cval; // cast shadow ray shadow_prd->result = make_float3(0.0f); #ifdef CONTRIB shadow_prd->rcoef = prd.rcoef * cval; #endif #ifdef ANTIMATTER shadow_prd->mask = prd.mask; shadow_prd->inside = prd.inside; #endif #ifdef DAYSIM_COMPATIBLE daysimSet(shadow_prd->dc, 0.0f); #endif rtTrace(top_object, *shadow_ray, *shadow_prd); #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, shadow_prd->dc, cval.x); #endif return cval * shadow_prd->result; } // sample Gaussian specular RT_METHOD float3 gaussamp(const NORMDAT *nd, const float3& ray_dir, PerRayData_radiance &prd) { float3 rcol = make_float3( 0.0f ); /* This section is based on the gaussamp method in normal.c */ if ((nd->specfl & (SP_REFL | SP_RBLT)) != SP_REFL && (nd->specfl & (SP_TRAN | SP_TBLT)) != SP_TRAN) return rcol; PerRayData_radiance gaus_prd; Ray gaus_ray = make_Ray(nd->hit, nd->pnorm, RADIANCE_RAY, RAY_START, RAY_END); float d; /* set up sample coordinates */ float3 u = getperpendicular(nd->pnorm); // prd.state? float3 v = cross(nd->pnorm, u); unsigned int nstarget, nstaken, ntrials; /* compute reflection */ if ((nd->specfl & (SP_REFL | SP_RBLT)) == SP_REFL && rayorigin(gaus_prd, prd, nd->scolor, 1, 1)) { //TODO the ambient depth increment is a hack to prevent the sun from affecting specular values float3 scolor = nd->scolor; nstarget = 1; if (specjitter > 1.5f) { /* multiple samples? */ // By default it's 1.0 nstarget = specjitter * prd.weight + 0.5f; if ( gaus_prd.weight <= minweight * nstarget ) nstarget = gaus_prd.weight / minweight; if ( nstarget > 1 ) { d = 1.0f / nstarget; scolor *= d; //scolor, stored as ray rcoef #ifdef CONTRIB gaus_prd.rcoef *= d; #endif gaus_prd.weight *= d; // TODO make sure weight isn't changed by hit programs } else nstarget = 1; } float3 scol = make_float3( 0.0f ); #ifdef DAYSIM_COMPATIBLE DaysimCoef dc = daysimNext(prd.dc); if (nstarget > 1) { daysimSet(dc, 0.0f); gaus_prd.dc = daysimNext(dc); } else gaus_prd.dc = dc; #endif //dimlist[ndims++] = (int)(size_t)np->mp; unsigned int maxiter = MAXITER * nstarget; for (nstaken = ntrials = 0; nstaken < nstarget && ntrials < maxiter; ntrials++) { float2 rv = make_float2( hiprand_uniform( prd.state ), hiprand_uniform( prd.state ) ); // should be evenly distributed in both dimensions d = 2.0f * M_PIf * rv.x; float cosp = cosf( d ); float sinp = sinf( d ); if ( ( 0.0f <= specjitter ) && ( specjitter < 1.0f ) ) rv.y = 1.0f - specjitter * rv.y; if ( rv.y <= FTINY ) d = 1.0f; else d = sqrtf(nd->alpha2 * -logf(rv.y)); float3 h = nd->pnorm + d * (cosp * u + sinp * v); d = -2.0f * dot( h, ray_dir ) / ( 1.0f + d*d ); gaus_ray.direction = ray_dir + h * d; /* sample rejection test */ if ((d = dot(gaus_ray.direction, nd->normal)) <= FTINY) continue; gaus_ray.direction = normalize( gaus_ray.direction ); gaus_ray.tmin = ray_start(nd->hit, gaus_ray.direction, nd->normal, RAY_START); gaus_ray.tmax = gaus_prd.tmax; setupPayload(gaus_prd); //if (nstaken) // check for prd data that needs to be cleared rtTrace(top_object, gaus_ray, gaus_prd); resolvePayload(prd, gaus_prd); /* W-G-M-D adjustment */ if (nstarget > 1) { d = 2.0f / (1.0f - dot(ray_dir, nd->normal) / d); scol += gaus_prd.result * d; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(dc, gaus_prd.dc, d); #endif } else { rcol += gaus_prd.result * scolor; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, gaus_prd.dc, scolor.x); #endif } ++nstaken; } /* final W-G-M-D weighting */ if (nstarget > 1) { scol *= scolor; d = (float)nstarget / ntrials; rcol += scol * d; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, dc, scolor.x * d); #endif } //ndims--; } #ifdef TRANSMISSION /* compute transmission */ float3 mcolor = nd->mcolor * nd->tspec; /* modified by color */ if ((nd->specfl & (SP_TRAN | SP_TBLT)) == SP_TRAN && rayorigin(gaus_prd, prd, mcolor, 0, 0)) { nstarget = 1; if (specjitter > 1.5f) { /* multiple samples? */ // By default it's 1.0 nstarget = specjitter * prd.weight + 0.5f; if ( gaus_prd.weight <= minweight * nstarget ) nstarget = gaus_prd.weight / minweight; if ( nstarget > 1 ) { d = 1.0f / nstarget; mcolor *= d; //mcolor, stored as ray rcoef #ifdef CONTRIB gaus_prd.rcoef *= d; #endif gaus_prd.weight *= d; // TODO make sure weight isn't changed by hit programs } else nstarget = 1; } //dimlist[ndims++] = (int)(size_t)np->mp; unsigned int maxiter = MAXITER * nstarget; for (nstaken = ntrials = 0; nstaken < nstarget && ntrials < maxiter; ntrials++) { float2 rv = make_float2( hiprand_uniform( prd.state ), hiprand_uniform( prd.state ) ); // should be evenly distributed in both dimensions d = 2.0f * M_PIf * rv.x; float cosp = cosf( d ); float sinp = sinf( d ); if ( ( 0.0f <= specjitter ) && ( specjitter < 1.0f ) ) rv.y = 1.0f - specjitter * rv.y; if ( rv.y <= FTINY ) d = 1.0f; else d = sqrtf(nd->alpha2 * -logf(rv.y)); gaus_ray.direction = nd->prdir + d * (cosp * u + sinp * v); // ray direction is perturbed /* sample rejection test */ if (dot(gaus_ray.direction, nd->normal) >= -FTINY) continue; gaus_ray.direction = normalize( gaus_ray.direction ); gaus_ray.tmin = ray_start(nd->hit, gaus_ray.direction, nd->normal, RAY_START); gaus_ray.tmax = gaus_prd.tmax; #ifdef DAYSIM_COMPATIBLE gaus_prd.dc = daysimNext(prd.dc); #endif setupPayload(gaus_prd); //if (nstaken) // check for prd data that needs to be cleared rtTrace(top_object, gaus_ray, gaus_prd); resolvePayload(prd, gaus_prd); rcol += gaus_prd.result * mcolor; ++nstaken; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, gaus_prd.dc, mcolor.x); #endif } //ndims--; } #endif //return make_float3(0.0f); return rcol; } #ifdef AMBIENT // Compute the ambient component and multiply by the coefficient. RT_METHOD float3 multambient(float3 aval, const float3& normal, const float3& pnormal, const float3& hit, const unsigned int& ambincl, PerRayData_radiance &prd) { if (exposure && !prd.ambient_depth) // TODO exposure is hack to check if we are running rvu return make_float3(0.0f); int do_ambient = 1; float d; if (ambdiv <= 0) /* no ambient calculation */ goto dumbamb; /* check number of bounces */ if (prd.ambient_depth >= ambounce) goto dumbamb; /* check ambient list */ if (!ambincl) goto dumbamb; if (ambacc > FTINY && navsum != 0) { /* ambient storage */ //if (tracktime) /* sort to minimize thrashing */ // sortambvals(0); /* interpolate ambient value */ //acol = make_float3( 0.0f ); //d = sumambient(acol, r, normal, rdepth, &atrunk, thescene.cuorg, thescene.cusize); PerRayData_ambient ambient_prd; ambient_prd.result = make_float3( 0.0f ); ambient_prd.surface_point = hit; ambient_prd.surface_normal = pnormal; ambient_prd.ambient_depth = prd.ambient_depth; ambient_prd.wsum = 0.0f; ambient_prd.weight = prd.weight; #ifdef DAYSIM_COMPATIBLE ambient_prd.dc = daysimNext(prd.dc); daysimSet(ambient_prd.dc, 0.0f); #endif #ifdef HIT_COUNT ambient_prd.hit_count = 0; #endif const float tmax = ray_start(hit, AMBIENT_RAY_LENGTH); Ray ambient_ray = make_Ray(hit - normal * tmax, normal, AMBIENT_RAY, 0.0f, 2.0f * tmax); rtTrace(top_ambient, ambient_ray, ambient_prd, RT_VISIBILITY_ALL, RT_RAY_FLAG_DISABLE_CLOSESTHIT); #ifdef HIT_COUNT prd.hit_count += ambient_prd.hit_count; #endif if (ambient_prd.wsum > FTINY) { // TODO if miss program is called, set wsum = 1.0f or place this before ambacc == 0.0f ambient_prd.result *= 1.0f / ambient_prd.wsum; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, ambient_prd.dc, aval.x / ambient_prd.wsum); #endif return aval * ambient_prd.result; } //rdepth++; /* need to cache new value */ //d = makeambient(acol, r, normal, rdepth-1); //TODO implement as miss program for ambient ray //rdepth--; //if ( dot( ambient_prd.result, ambient_prd.result) > FTINY) { // quick check to see if a value was returned by miss program // return aval * ambient_prd.result; /* got new value */ //} do_ambient = !prd.ambient_depth && ambdiv_final; } if (do_ambient) { /* no ambient storage */ /* Option to show error if nothing found */ if (ambdiv_final < 0) rtThrow(RT_EXCEPTION_CUSTOM - ambdiv_final); float3 acol = aval; #ifdef DAYSIM_COMPATIBLE DaysimCoef dc = daysimNext(prd.dc); daysimSet(dc, 0.0f); d = doambient(&acol, normal, pnormal, hit, prd, dc); if (d > FTINY) daysimAdd(prd.dc, dc); #else d = doambient(&acol, normal, pnormal, hit, prd); #endif if (d > FTINY) return acol; } dumbamb: /* return global value */ if ((ambvwt <= 0) || (navsum == 0)) { #ifdef DAYSIM_COMPATIBLE daysimAdd(prd.dc, aval.x * ambval.x); #endif return aval * ambval; } float l = bright(ambval); /* average in computations */ if (l > FTINY) { d = (logf(l)*(float)ambvwt + avsum) / (float)(ambvwt + navsum); d = expf(d) / l; aval *= ambval; /* apply color of ambval */ #ifdef DAYSIM_COMPATIBLE daysimAdd(prd.dc, aval.x * ambval.x * d); #endif } else { d = expf( avsum / (float)navsum ); #ifdef DAYSIM_COMPATIBLE daysimAdd(prd.dc, aval.x * d); #endif } return aval * d; } /* sample indirect hemisphere, based on samp_hemi in ambcomp.c */ #ifdef DAYSIM_COMPATIBLE RT_METHOD int doambient(float3 *rcol, const float3& normal, const float3& pnormal, const float3& hit, PerRayData_radiance &prd, DaysimCoef dc) #else RT_METHOD int doambient(float3 *rcol, const float3& normal, const float3& pnormal, const float3& hit, PerRayData_radiance &prd) #endif { float d; float wt = prd.weight; /* set number of divisions */ if (wt > (d = 0.8f * fmaxf(*rcol) * wt / (ambdiv_final * minweight))) // Ignore ambacc <= FTINY check because this is faking ambacc == 0 calc wt = d; /* avoid ray termination */ int n = sqrtf(ambdiv_final * wt) + 0.5f; int i = 1 + 5 * (ambacc > FTINY); /* minimum number of samples */ if (n < i) n = i; const int nn = n * n; float3 acol = make_float3( 0.0f ); unsigned int sampOK = 0u; /* assign coefficient */ float3 acoef = *rcol / nn; /* Setup from ambsample in ambcomp.c */ PerRayData_radiance new_prd; #ifdef DAYSIM_COMPATIBLE new_prd.dc = daysimNext(dc); #endif Ray amb_ray = make_Ray( hit, pnormal, RADIANCE_RAY, RAY_START, RAY_END ); // Use normal point as temporary direction /* End ambsample setup */ /* make tangent plane axes */ float3 ux = getperpendicular( pnormal, prd.state ); float3 uy = cross( pnormal, ux ); /* sample divisions */ for (i = n; i--; ) for (int j = n; j--; ) { if (!rayorigin(new_prd, prd, acoef, 1, 1)) continue; //hp.sampOK += ambsample( &hp, i, j, normal, hit ); /* ambsample in ambcomp.c */ float2 spt = make_float2(hiprand_uniform(prd.state), hiprand_uniform(prd.state)); if (i > 0 && i < n - 1 && j > 0 && j < n - 1) /* avoid coincident samples */ spt = 0.1f + 0.8f * spt; SDsquare2disk( spt, (j+spt.y) / n, (i+spt.x) / n ); float zd = sqrtf( 1.0f - dot( spt, spt ) ); amb_ray.direction = normalize( spt.x*ux + spt.y*uy + zd*pnormal ); if (dot(amb_ray.direction, normal) <= 0) /* Prevent light leaks */ continue; amb_ray.tmin = ray_start( hit, amb_ray.direction, normal, RAY_START ); amb_ray.tmax = new_prd.tmax; //dimlist[ndims++] = AI(hp,i,j) + 90171; setupPayload(new_prd); //Ray amb_ray = make_Ray( hit, rdir, RADIANCE_RAY, RAY_START, new_prd.tmax ); rtTrace(top_object, amb_ray, new_prd); resolvePayload(prd, new_prd); //ndims--; if ( isnan( new_prd.result ) ) // TODO How does this happen? continue; if ( new_prd.distance <= FTINY ) continue; /* should never happen */ acol += new_prd.result * acoef; /* add to our sum */ #ifdef DAYSIM_COMPATIBLE daysimAddScaled(dc, new_prd.dc, acoef.x); #endif sampOK++; } *rcol = acol; if ( !sampOK ) { /* utter failure? */ return( 0 ); } if ( sampOK < nn ) { //hp.sampOK *= -1; /* soft failure */ return( 1 ); } //n = ambssamp * wt + 0.5f; //if (n > 8) { /* perform super-sampling? */ // ambsupersamp(hp, n); // *rcol = hp.acol; //} return( 1 ); /* all is well */ } #endif /* AMBIENT */ #ifdef LIGHTS /* partition a flat source */ RT_METHOD unsigned int flatpart( const float3& v, const float3& r0, const float3& r1, const float3& r2, const float& weight ) { //float3 vp = source[si->sn].snorm; //if ( dot( v, vp ) <= 0.0f ) /* behind source */ // return 0u; if ( srcsizerat <= FTINY ) return 1u; float d; /* Find longest edge */ float3 vp = r1 - r0; float d2 = dot( vp, vp ); vp = r2 - r1; if ( ( d = dot( vp, vp ) ) > d2 ) d2 = d; vp = r2 - r0; if ( ( d = dot( vp, vp ) ) > d2 ) d2 = d; /* Find minimum partition size */ d = srcsizerat / weight; d *= d * dot( v, v ); /* Find number of partions */ d2 /= d; if ( d2 < 1.0f ) return 1u; if ( d2 > ( d = MAXSPART >> 1 ) ) // Divide maximum partitions by two going from rectangle to triangle d2 = d; return (unsigned int)sqrtf( d2 ); } /* Solid angle calculation from "The solid angle of a plane triangle", A van Oosterom and J Strackee */ RT_METHOD float solid_angle( const float3& r0, const float3& r1, const float3& r2 ) { const float l0 = length( r0 ); const float l1 = length( r1 ); const float l2 = length( r2 ); const float numerator = dot( r0, cross( r1, r2 ) ); const float denominator = l0 * l1 * l2 + dot( r0, r1 ) * l2 + dot( r0, r2 ) * l1 + dot( r1, r2 ) * l0; return 2.0f * fabsf( atan2( numerator, denominator ) ); } /* Compute point from barycentric coordinates and flip if outside triangle */ RT_METHOD float3 barycentric( float2& lambda, const float3& r0, const float3& r1, const float3& r2, const int flip ) { if ( flip ) lambda = 1.0f - lambda; return r0 * ( 1.0f - lambda.x - lambda.y ) + r1 * lambda.x + r2 * lambda.y; } #endif /* LIGHTS */ /* convert 1-dimensional sample to 2 dimensions, based on multisamp.c */ //RT_METHOD float2 multisamp2(float r) /* 1-dimensional sample [0,1) */ //{ // int j; // register int k; // int2 ti; // float s; // // ti = make_int2( 0 ); // j = 8; // while (j--) { // k = s = r*(1<<2); // r = s - k; // ti += ti + make_int2( ((k>>2) & 1), ((k>>1) & 1) ); // } // ti += make_int2( frandom() ); // ti *= 1.0f/256.0f; //} /* hash a set of integer values */ //RT_METHOD int ilhash(int3 d) //{ // register int hval; // // hval = 0; // hval ^= d.x * 73771; // hval ^= d.y * 96289; // hval ^= d.z * 103699; // return(hval & 0x7fffffff); //}
66690f9215c049a47051ff48ad7ff57702d6469f.cu
/* * material_normal.cu - hit programs for normal materials on GPUs. */ #include "accelerad_copyright.h" #include "otypes.h" /* For definition of MAT_METAL */ #include <optix.h> #include <optixu/optixu_math_namespace.h> #include "optix_shader_ray.h" #ifdef CONTRIB_DOUBLE #include "optix_double.h" #endif using namespace optix; #define AMBIENT #define TRANSMISSION #ifndef MAXITER #define MAXITER 10 /* maximum # specular ray attempts */ #endif #define MAXSPART 64 /* maximum partitions per source */ //#define frandom() (rnd( prd.seed )/float(RAND_MAX)) //#define frandom() (rnd( prd.seed )) /* specularity flags */ #define SP_REFL 01 /* has reflected specular component */ #define SP_TRAN 02 /* has transmitted specular */ #define SP_PURE 04 /* purely specular (zero roughness) */ #define SP_FLAT 010 /* flat reflecting surface */ #define SP_RBLT 020 /* reflection below sample threshold */ #define SP_TBLT 040 /* transmission below threshold */ typedef struct { unsigned int specfl; /* specularity flags, defined above */ float3 mcolor; /* color of this material */ float3 scolor; /* color of specular component */ //float3 vrefl; /* vector in direction of reflected ray */ float3 prdir; /* vector in transmitted direction */ float3 normal; float3 hit; float alpha2; /* roughness squared */ float rdiff, rspec; /* reflected specular, diffuse */ float trans; /* transmissivity */ float tdiff, tspec; /* transmitted specular, diffuse */ float3 pnorm; /* perturbed surface normal */ float pdot; /* perturbed dot product */ } NORMDAT; /* normal material data */ /* Context variables */ rtDeclareVariable(rtObject, top_object, , ); rtDeclareVariable(rtObject, top_ambient, , ); rtDeclareVariable(float, specthresh, , ); /* This is the minimum fraction of reflection or transmission, under which no specular sampling is performed */ rtDeclareVariable(float, specjitter, , ); /* specular sampling (ss) */ #ifdef LIGHTS rtDeclareVariable(float, dstrsrc, , ); /* direct jitter (dj) */ rtDeclareVariable(float, srcsizerat, , ); /* direct sampling ratio (ds) */ //rtDeclareVariable(float, shadthresh, , ); /* direct threshold (dt) */ //rtDeclareVariable(float, shadcert, , ); /* direct certainty (dc) */ //rtDeclareVariable(int, directrelay, , ); /* direct relays for secondary sources (dr) */ //rtDeclareVariable(int, vspretest, , ); /* direct presampling density for secondary sources (dp) */ #endif /* LIGHTS */ #ifdef AMBIENT rtDeclareVariable(float3, ambval, , ); /* This is the final value used in place of an indirect light calculation */ rtDeclareVariable(int, ambvwt, , ); /* As new indirect irradiances are computed, they will modify the default ambient value in a moving average, with the specified weight assigned to the initial value given on the command and all other weights set to 1 */ rtDeclareVariable(int, ambounce, , ); /* Ambient bounces (ab) */ //rtDeclareVariable(int, ambres, , ); /* Ambient resolution (ar) */ rtDeclareVariable(float, ambacc, , ); /* Ambient accuracy (aa). This value will approximately equal the error from indirect illuminance interpolation */ rtDeclareVariable(int, ambdiv, , ); /* Ambient divisions (ad) */ rtDeclareVariable(int, ambdiv_final, , ); /* Number of ambient divisions for final-pass fill (ag) */ //rtDeclareVariable(int, ambssamp, , ); /* Ambient super-samples (as) */ rtDeclareVariable(float, avsum, , ); /* computed ambient value sum (log) */ rtDeclareVariable(unsigned int, navsum, , ); /* number of values in avsum */ #endif /* AMBIENT */ rtDeclareVariable(float, exposure, , ) = 0.0f; /* Current exposure (-pe), zero unless called from rvu */ rtBuffer<DistantLight> lights; /* Geometry instance variables */ #ifdef LIGHTS rtBuffer<float3> vertex_buffer; rtBuffer<uint3> lindex_buffer; // position indices #endif RT_METHOD float3 dirnorm(Ray *shadow_ray, PerRayData_shadow *shadow_prd, const NORMDAT *nd, const float& omega, const float3& ray_dir, PerRayData_radiance &prd); RT_METHOD float3 gaussamp(const NORMDAT *nd, const float3& ray_dir, PerRayData_radiance &prd); #ifdef AMBIENT RT_METHOD float3 multambient(float3 aval, const float3& normal, const float3& pnormal, const float3& hit, const unsigned int& ambincl, PerRayData_radiance &prd); #ifdef DAYSIM_COMPATIBLE RT_METHOD int doambient(float3 *rcol, const float3& normal, const float3& pnormal, const float3& hit, PerRayData_radiance &prd, DaysimCoef dc); #else RT_METHOD int doambient(float3 *rcol, const float3& normal, const float3& pnormal, const float3& hit, PerRayData_radiance &prd); #endif //RT_METHOD int ambsample( AMBHEMI *hp, const int& i, const int& j, const float3 normal, const float3 hit ); #endif /* AMBIENT */ #ifdef LIGHTS RT_METHOD unsigned int flatpart( const float3& v, const float3& r0, const float3& r1, const float3& r2, const float& weight ); RT_METHOD float solid_angle( const float3& r0, const float3& r1, const float3& r2 ); RT_METHOD float3 barycentric( float2& lambda, const float3& r0, const float3& r1, const float3& r2, const int flip ); #endif /* LIGHTS */ //RT_METHOD float2 multisamp2(float r); //RT_METHOD int ilhash(int3 d); RT_CALLABLE_PROGRAM PerRayData_shadow closest_hit_normal_shadow(IntersectData const&data, PerRayData_shadow prd_shadow) { NORMDAT nd; /* check for back side */ nd.pnorm = faceforward(data.world_shading_normal, -data.ray_direction, data.world_geometric_normal); nd.normal = faceforward(data.world_geometric_normal, -data.ray_direction, data.world_geometric_normal); nd.hit = data.hit; nd.mcolor = data.mat.color; nd.rspec = data.mat.params.n.spec; nd.alpha2 = data.mat.params.n.rough * data.mat.params.n.rough; nd.specfl = 0u; /* specularity flags */ #ifdef TRANSMISSION if (data.mat.params.n.trans > 0.0f) { // type == MAT_TRANS /* get roughness */ if (nd.alpha2 <= FTINY) { nd.specfl |= SP_PURE; // label this as a purely specular reflection } /* perturb normal */ float3 pert = nd.normal - nd.pnorm; int hastexture = dot(pert, pert) > FTINY * FTINY; nd.pdot = -dot(data.ray_direction, nd.pnorm); if (nd.pdot < 0.0f) { /* fix orientation from raynormal in raytrace.c */ nd.pnorm += 2.0f * nd.pdot * data.ray_direction; nd.pdot = -nd.pdot; } if (nd.pdot < 0.001f) nd.pdot = 0.001f; /* non-zero for dirnorm() */ // if it's a face or a ring label as flat (currently we only support triangles, so everything is flat) nd.specfl |= SP_FLAT; /* modify material color */ //nd.mcolor *= rtTex3D(rtTextureId id, texcoord.x, texcoord.y, texcoord.z).xyz; /* compute Fresnel approx. */ float fest = 0.0f; if (nd.specfl & SP_PURE && nd.rspec >= FRESTHRESH) { fest = FRESNE(nd.pdot); nd.rspec += fest * (1.0f - nd.rspec); } /* compute transmission */ nd.prdir = data.ray_direction; nd.trans = data.mat.params.n.trans * (1.0f - nd.rspec); nd.tspec = nd.trans * data.mat.params.n.tspec; if (nd.tspec > FTINY) { nd.specfl |= SP_TRAN; /* check threshold */ if (!(nd.specfl & SP_PURE) && specthresh >= nd.tspec - FTINY) nd.specfl |= SP_TBLT; if (hastexture) { //TODO only if ambient depth == 0 if (dot(nd.prdir - pert, nd.normal) < -FTINY) nd.prdir = normalize(nd.prdir - pert); /* OK */ } } } /* transmitted ray */ if ((nd.specfl&(SP_TRAN | SP_PURE | SP_TBLT)) == (SP_TRAN | SP_PURE)) { #ifdef CONTRIB prd_shadow.rcoef *= nd.mcolor * nd.tspec; #endif Ray trans_ray = make_Ray(nd.hit, nd.prdir, SHADOW_RAY, ray_start(nd.hit, nd.prdir, nd.normal, RAY_START), RAY_END); rtTrace(top_object, trans_ray, prd_shadow); prd_shadow.result *= nd.mcolor * nd.tspec; #ifdef DAYSIM_COMPATIBLE daysimScale(prd_shadow.dc, nd.mcolor.x * nd.tspec); #endif } #endif /* TRANSMISSION */ return prd_shadow; } RT_CALLABLE_PROGRAM PerRayData_radiance closest_hit_normal_radiance(IntersectData const&data, PerRayData_radiance prd) { NORMDAT nd; /* check for back side */ nd.pnorm = faceforward(data.world_shading_normal, -data.ray_direction, data.world_geometric_normal); nd.normal = faceforward(data.world_geometric_normal, -data.ray_direction, data.world_geometric_normal); nd.hit = data.hit; PerRayData_radiance new_prd; float3 result = prd.mirror = make_float3(0.0f); nd.mcolor = data.mat.color; nd.scolor = make_float3(0.0f); nd.rspec = data.mat.params.n.spec; nd.alpha2 = data.mat.params.n.rough * data.mat.params.n.rough; nd.specfl = 0u; /* specularity flags */ /* get roughness */ if (nd.alpha2 <= FTINY) { nd.specfl |= SP_PURE; // label this as a purely specular reflection } /* perturb normal */ float3 pert = nd.normal - nd.pnorm; int hastexture = dot(pert, pert) > FTINY * FTINY; nd.pdot = -dot(data.ray_direction, nd.pnorm); if (nd.pdot < 0.0f) { /* fix orientation from raynormal in raytrace.c */ nd.pnorm += 2.0f * nd.pdot * data.ray_direction; nd.pdot = -nd.pdot; } if (nd.pdot < 0.001f) nd.pdot = 0.001f; /* non-zero for dirnorm() */ // if it's a face or a ring label as flat (currently we only support triangles, so everything is flat) nd.specfl |= SP_FLAT; /* modify material color */ //nd.mcolor *= rtTex3D(rtTextureId id, texcoord.x, texcoord.y, texcoord.z).xyz; /* compute Fresnel approx. */ float fest = 0.0f; if (nd.specfl & SP_PURE && nd.rspec >= FRESTHRESH) { fest = FRESNE(nd.pdot); nd.rspec += fest * (1.0f - nd.rspec); } /* compute transmission */ nd.tdiff = nd.tspec = nd.trans = 0.0f; // because it's opaque #ifdef TRANSMISSION nd.prdir = data.ray_direction; if (data.mat.params.n.trans > 0.0f) { // data.mat.type == MAT_TRANS nd.trans = data.mat.params.n.trans * (1.0f - nd.rspec); nd.tspec = nd.trans * data.mat.params.n.tspec; nd.tdiff = nd.trans - nd.tspec; if (nd.tspec > FTINY) { nd.specfl |= SP_TRAN; /* check threshold */ if (!(nd.specfl & SP_PURE) && specthresh >= nd.tspec - FTINY) nd.specfl |= SP_TBLT; if (!prd.ambient_depth && hastexture) { if (dot(nd.prdir - pert, nd.normal) < -FTINY) nd.prdir = normalize(nd.prdir - pert); /* OK */ } } } /* diffuse reflection */ nd.rdiff = 1.0f - nd.trans - nd.rspec; /* transmitted ray */ if ((nd.specfl&(SP_TRAN | SP_PURE | SP_TBLT)) == (SP_TRAN | SP_PURE) && rayorigin(new_prd, prd, nd.mcolor * nd.tspec, 0, 0)) { #ifdef DAYSIM_COMPATIBLE new_prd.dc = daysimNext(prd.dc); #endif setupPayload(new_prd); Ray trans_ray = make_Ray(nd.hit, nd.prdir, RADIANCE_RAY, ray_start(nd.hit, nd.prdir, nd.normal, RAY_START), new_prd.tmax); rtTrace(top_object, trans_ray, new_prd); new_prd.result *= nd.mcolor * nd.tspec; result += new_prd.result; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, new_prd.dc, nd.mcolor.x * nd.tspec); #endif if (nd.tspec >= 1.0f - FTINY) { /* completely transparent */ prd.mirror = new_prd.mirror * nd.mcolor * nd.tspec; prd.mirror_distance = data.t + new_prd.mirror_distance; prd.distance = data.t + new_prd.distance; } else if (nd.tspec > nd.tdiff + nd.rdiff) prd.distance = data.t + rayDistance(new_prd); resolvePayload(prd, new_prd); } #endif // return if it's a shadow ray, which it isn't /* get specular reflection */ if (nd.rspec > FTINY) { nd.specfl |= SP_REFL; /* compute specular color */ if (data.mat.type != MAT_METAL) { nd.scolor = make_float3(nd.rspec); } else { if (fest > FTINY) { float d = data.mat.params.n.spec * (1.0f - fest); nd.scolor = fest + nd.mcolor * d; } else { nd.scolor = nd.mcolor * nd.rspec; } } /* check threshold */ if (!(nd.specfl & SP_PURE) && specthresh >= nd.rspec - FTINY) { nd.specfl |= SP_RBLT; } } /* reflected ray */ if ((nd.specfl&(SP_REFL | SP_PURE | SP_RBLT)) == (SP_REFL | SP_PURE) && rayorigin(new_prd, prd, nd.scolor, 1, 0)) { #ifdef DAYSIM_COMPATIBLE new_prd.dc = daysimNext(prd.dc); #endif setupPayload(new_prd); float3 vrefl = reflect(data.ray_direction, nd.pnorm); Ray refl_ray = make_Ray(nd.hit, vrefl, RADIANCE_RAY, ray_start(nd.hit, vrefl, nd.normal, RAY_START), new_prd.tmax); rtTrace(top_object, refl_ray, new_prd); new_prd.result *= nd.scolor; prd.mirror = new_prd.result; result += new_prd.result; prd.mirror_distance = data.t; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, new_prd.dc, nd.scolor.x); #endif if (nd.specfl & SP_FLAT && (prd.ambient_depth || !hastexture)) prd.mirror_distance += rayDistance(new_prd); resolvePayload(prd, new_prd); } if (!(nd.specfl & SP_PURE && nd.rdiff <= FTINY && nd.tdiff <= FTINY)) { /* not 100% pure specular */ /* checks *BLT flags */ if (!(nd.specfl & SP_PURE)) result += gaussamp(&nd, data.ray_direction, prd); #ifdef AMBIENT /* ambient from this side */ if (nd.rdiff > FTINY) { float3 aval = nd.mcolor * nd.rdiff; /* modified by material color */ if (nd.specfl & SP_RBLT) /* add in specular as well? */ aval += nd.scolor; result += multambient(aval, nd.normal, nd.pnorm, nd.hit, data.mat.params.n.ambincl, prd); /* add to returned color */ } #ifdef TRANSMISSION /* ambient from other side */ if (nd.tdiff > FTINY) { float3 aval = nd.mcolor; /* modified by material color */ if (nd.specfl & SP_TBLT) aval *= nd.trans; else aval *= nd.tdiff; result += multambient(aval, -nd.normal, -nd.pnorm, nd.hit, data.mat.params.n.ambincl, prd); /* add to returned color */ } #endif /* TRANSMISSION */ #endif /* AMBIENT */ /* add direct component */ // This is the call to direct() in source.c // Let's start at line 447, and not bother with sorting for now // compute direct lighting PerRayData_shadow shadow_prd; #ifdef DAYSIM_COMPATIBLE shadow_prd.dc = daysimNext(prd.dc); #endif Ray shadow_ray = make_Ray(nd.hit, nd.pnorm, SHADOW_RAY, RAY_START, RAY_END); /* contributions from distant lights (mainly the sun) */ unsigned int num_lights = lights.size(); for (unsigned int i = 0u; i < num_lights; i++) { const DistantLight light = lights[i]; if ( light.casts_shadow ) { shadow_prd.target = i; shadow_ray.direction = normalize( light.pos ); //TODO implement direct jitter for distant light sources shadow_ray.tmin = ray_start(nd.hit, shadow_ray.direction, nd.normal, RAY_START); shadow_ray.tmax = RAY_END; result += dirnorm(&shadow_ray, &shadow_prd, &nd, light.solid_angle, data.ray_direction, prd); } } #ifdef LIGHTS /* contributions from nearby lights */ num_lights = lindex_buffer.size(); for (unsigned int i = 0u; i < num_lights; i++) { const uint3 v_idx = lindex_buffer[i]; const float3 r0 = vertex_buffer[v_idx.x] - nd.hit; const float3 r1 = vertex_buffer[v_idx.y] - nd.hit; const float3 r2 = vertex_buffer[v_idx.z] - nd.hit; float3 rdir = ( r0 + r1 + r2 ) / 3.0f; const unsigned int divs = flatpart( rdir, r0, r1, r2, prd.weight ); //TODO divisions should be smaller closer to the light source const float step = 1.0f / divs; for ( int j = 0; j < divs; j++ ) for ( int k = 0; k < divs; k++ ) { float2 lambda = make_float2( step * j, step * k ); const float3 p0 = barycentric( lambda, r0, r1, r2, k + j >= divs ); lambda = make_float2( step * ( j + 1 ), step * k ); const float3 p1 = barycentric( lambda, r0, r1, r2, k + j >= divs ); lambda = make_float2( step * j, step * ( k + 1 ) ); const float3 p2 = barycentric( lambda, r0, r1, r2, k + j >= divs ); const float omega = solid_angle( p0, p1, p2 ); if ( omega > FTINY ) { /* from nextssamp in srcsamp.c */ rdir = ( p0 + p1 + p2 ) / 3.0f; if ( dstrsrc > FTINY ) { /* jitter sample using random barycentric coordinates */ lambda = make_float2( curand_uniform( prd.state ), curand_uniform( prd.state ) ); float3 vpos = barycentric( lambda, p0, p1, p2, lambda.x + lambda.y >= 1.0f ); rdir += dstrsrc * ( vpos - rdir ); } shadow_prd.target = -v_idx.x - 1; //TODO find a better way to identify surface shadow_ray.direction = normalize( rdir ); shadow_ray.tmin = ray_start(nd.hit, shadow_ray.direction, nd.normal, RAY_START); shadow_ray.tmax = length(rdir) * 1.0001f; result += dirnorm(&shadow_ray, &shadow_prd, &nd, omega, data.ray_direction, prd); } } } #endif /* LIGHTS */ } // pass the color back up the tree prd.result = result; return prd; } /* compute source contribution */ RT_METHOD float3 dirnorm(Ray *shadow_ray, PerRayData_shadow *shadow_prd, const NORMDAT *nd, const float& omega, const float3& ray_dir, PerRayData_radiance &prd) { float3 cval = make_float3( 0.0f ); float ldot = dot(nd->pnorm, shadow_ray->direction); #ifdef TRANSMISSION if (ldot < 0.0f ? nd->trans <= FTINY : nd->trans >= 1.0f - FTINY) #else if ( ldot <= FTINY ) #endif return cval; /* Fresnel estimate */ float lrdiff = nd->rdiff; float ltdiff = nd->tdiff; if (nd->specfl & SP_PURE && nd->rspec >= FRESTHRESH && (lrdiff > FTINY) | (ltdiff > FTINY)) { float dtmp = 1.0f - FRESNE(fabs(ldot)); lrdiff *= dtmp; ltdiff *= dtmp; } if (ldot > FTINY && lrdiff > FTINY) { /* * Compute and add diffuse reflected component to returned * color. The diffuse reflected component will always be * modified by the color of the material. */ float dtmp = ldot * omega * lrdiff * M_1_PIf; cval += nd->mcolor * dtmp; } #ifdef TRANSMISSION if (ldot < -FTINY && ltdiff > FTINY) { /* * Compute diffuse transmission. */ float dtmp = -ldot * omega * ltdiff * M_1_PIf; cval += nd->mcolor * dtmp; } #endif if (ldot > FTINY && (nd->specfl&(SP_REFL | SP_PURE)) == SP_REFL) { /* * Compute specular reflection coefficient using * Gaussian distribution model. */ /* roughness */ float dtmp = nd->alpha2; /* + source if flat */ if (nd->specfl & SP_FLAT) dtmp += omega * 0.25f * M_1_PIf; /* half vector */ float3 vtmp = shadow_ray->direction - ray_dir; float d2 = dot(vtmp, nd->pnorm); d2 *= d2; float d3 = dot( vtmp, vtmp ); float d4 = (d3 - d2) / d2; /* new W-G-M-D model */ dtmp = expf(-d4/dtmp) * d3 / (M_PIf * d2*d2 * dtmp); /* worth using? */ if (dtmp > FTINY) { dtmp *= ldot * omega; cval += nd->scolor * dtmp; } } #ifdef TRANSMISSION if (ldot < -FTINY && (nd->specfl&(SP_TRAN | SP_PURE)) == SP_TRAN) { /* * Compute specular transmission. Specular transmission * is always modified by material color. */ /* roughness + source */ float dtmp = nd->alpha2 + omega * M_1_PIf; /* Gaussian */ dtmp = expf((2.0f * dot(nd->prdir, shadow_ray->direction) - 2.0f) / dtmp) / (M_PIf * dtmp); // may need to perturb direction /* worth using? */ if (dtmp > FTINY) { dtmp *= nd->tspec * omega * sqrtf(-ldot / nd->pdot); cval += nd->mcolor * dtmp; } } #endif /* from direct() in source.c */ if (fmaxf(cval) <= 0.0f) return cval; // cast shadow ray shadow_prd->result = make_float3(0.0f); #ifdef CONTRIB shadow_prd->rcoef = prd.rcoef * cval; #endif #ifdef ANTIMATTER shadow_prd->mask = prd.mask; shadow_prd->inside = prd.inside; #endif #ifdef DAYSIM_COMPATIBLE daysimSet(shadow_prd->dc, 0.0f); #endif rtTrace(top_object, *shadow_ray, *shadow_prd); #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, shadow_prd->dc, cval.x); #endif return cval * shadow_prd->result; } // sample Gaussian specular RT_METHOD float3 gaussamp(const NORMDAT *nd, const float3& ray_dir, PerRayData_radiance &prd) { float3 rcol = make_float3( 0.0f ); /* This section is based on the gaussamp method in normal.c */ if ((nd->specfl & (SP_REFL | SP_RBLT)) != SP_REFL && (nd->specfl & (SP_TRAN | SP_TBLT)) != SP_TRAN) return rcol; PerRayData_radiance gaus_prd; Ray gaus_ray = make_Ray(nd->hit, nd->pnorm, RADIANCE_RAY, RAY_START, RAY_END); float d; /* set up sample coordinates */ float3 u = getperpendicular(nd->pnorm); // prd.state? float3 v = cross(nd->pnorm, u); unsigned int nstarget, nstaken, ntrials; /* compute reflection */ if ((nd->specfl & (SP_REFL | SP_RBLT)) == SP_REFL && rayorigin(gaus_prd, prd, nd->scolor, 1, 1)) { //TODO the ambient depth increment is a hack to prevent the sun from affecting specular values float3 scolor = nd->scolor; nstarget = 1; if (specjitter > 1.5f) { /* multiple samples? */ // By default it's 1.0 nstarget = specjitter * prd.weight + 0.5f; if ( gaus_prd.weight <= minweight * nstarget ) nstarget = gaus_prd.weight / minweight; if ( nstarget > 1 ) { d = 1.0f / nstarget; scolor *= d; //scolor, stored as ray rcoef #ifdef CONTRIB gaus_prd.rcoef *= d; #endif gaus_prd.weight *= d; // TODO make sure weight isn't changed by hit programs } else nstarget = 1; } float3 scol = make_float3( 0.0f ); #ifdef DAYSIM_COMPATIBLE DaysimCoef dc = daysimNext(prd.dc); if (nstarget > 1) { daysimSet(dc, 0.0f); gaus_prd.dc = daysimNext(dc); } else gaus_prd.dc = dc; #endif //dimlist[ndims++] = (int)(size_t)np->mp; unsigned int maxiter = MAXITER * nstarget; for (nstaken = ntrials = 0; nstaken < nstarget && ntrials < maxiter; ntrials++) { float2 rv = make_float2( curand_uniform( prd.state ), curand_uniform( prd.state ) ); // should be evenly distributed in both dimensions d = 2.0f * M_PIf * rv.x; float cosp = cosf( d ); float sinp = sinf( d ); if ( ( 0.0f <= specjitter ) && ( specjitter < 1.0f ) ) rv.y = 1.0f - specjitter * rv.y; if ( rv.y <= FTINY ) d = 1.0f; else d = sqrtf(nd->alpha2 * -logf(rv.y)); float3 h = nd->pnorm + d * (cosp * u + sinp * v); d = -2.0f * dot( h, ray_dir ) / ( 1.0f + d*d ); gaus_ray.direction = ray_dir + h * d; /* sample rejection test */ if ((d = dot(gaus_ray.direction, nd->normal)) <= FTINY) continue; gaus_ray.direction = normalize( gaus_ray.direction ); gaus_ray.tmin = ray_start(nd->hit, gaus_ray.direction, nd->normal, RAY_START); gaus_ray.tmax = gaus_prd.tmax; setupPayload(gaus_prd); //if (nstaken) // check for prd data that needs to be cleared rtTrace(top_object, gaus_ray, gaus_prd); resolvePayload(prd, gaus_prd); /* W-G-M-D adjustment */ if (nstarget > 1) { d = 2.0f / (1.0f - dot(ray_dir, nd->normal) / d); scol += gaus_prd.result * d; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(dc, gaus_prd.dc, d); #endif } else { rcol += gaus_prd.result * scolor; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, gaus_prd.dc, scolor.x); #endif } ++nstaken; } /* final W-G-M-D weighting */ if (nstarget > 1) { scol *= scolor; d = (float)nstarget / ntrials; rcol += scol * d; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, dc, scolor.x * d); #endif } //ndims--; } #ifdef TRANSMISSION /* compute transmission */ float3 mcolor = nd->mcolor * nd->tspec; /* modified by color */ if ((nd->specfl & (SP_TRAN | SP_TBLT)) == SP_TRAN && rayorigin(gaus_prd, prd, mcolor, 0, 0)) { nstarget = 1; if (specjitter > 1.5f) { /* multiple samples? */ // By default it's 1.0 nstarget = specjitter * prd.weight + 0.5f; if ( gaus_prd.weight <= minweight * nstarget ) nstarget = gaus_prd.weight / minweight; if ( nstarget > 1 ) { d = 1.0f / nstarget; mcolor *= d; //mcolor, stored as ray rcoef #ifdef CONTRIB gaus_prd.rcoef *= d; #endif gaus_prd.weight *= d; // TODO make sure weight isn't changed by hit programs } else nstarget = 1; } //dimlist[ndims++] = (int)(size_t)np->mp; unsigned int maxiter = MAXITER * nstarget; for (nstaken = ntrials = 0; nstaken < nstarget && ntrials < maxiter; ntrials++) { float2 rv = make_float2( curand_uniform( prd.state ), curand_uniform( prd.state ) ); // should be evenly distributed in both dimensions d = 2.0f * M_PIf * rv.x; float cosp = cosf( d ); float sinp = sinf( d ); if ( ( 0.0f <= specjitter ) && ( specjitter < 1.0f ) ) rv.y = 1.0f - specjitter * rv.y; if ( rv.y <= FTINY ) d = 1.0f; else d = sqrtf(nd->alpha2 * -logf(rv.y)); gaus_ray.direction = nd->prdir + d * (cosp * u + sinp * v); // ray direction is perturbed /* sample rejection test */ if (dot(gaus_ray.direction, nd->normal) >= -FTINY) continue; gaus_ray.direction = normalize( gaus_ray.direction ); gaus_ray.tmin = ray_start(nd->hit, gaus_ray.direction, nd->normal, RAY_START); gaus_ray.tmax = gaus_prd.tmax; #ifdef DAYSIM_COMPATIBLE gaus_prd.dc = daysimNext(prd.dc); #endif setupPayload(gaus_prd); //if (nstaken) // check for prd data that needs to be cleared rtTrace(top_object, gaus_ray, gaus_prd); resolvePayload(prd, gaus_prd); rcol += gaus_prd.result * mcolor; ++nstaken; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, gaus_prd.dc, mcolor.x); #endif } //ndims--; } #endif //return make_float3(0.0f); return rcol; } #ifdef AMBIENT // Compute the ambient component and multiply by the coefficient. RT_METHOD float3 multambient(float3 aval, const float3& normal, const float3& pnormal, const float3& hit, const unsigned int& ambincl, PerRayData_radiance &prd) { if (exposure && !prd.ambient_depth) // TODO exposure is hack to check if we are running rvu return make_float3(0.0f); int do_ambient = 1; float d; if (ambdiv <= 0) /* no ambient calculation */ goto dumbamb; /* check number of bounces */ if (prd.ambient_depth >= ambounce) goto dumbamb; /* check ambient list */ if (!ambincl) goto dumbamb; if (ambacc > FTINY && navsum != 0) { /* ambient storage */ //if (tracktime) /* sort to minimize thrashing */ // sortambvals(0); /* interpolate ambient value */ //acol = make_float3( 0.0f ); //d = sumambient(acol, r, normal, rdepth, &atrunk, thescene.cuorg, thescene.cusize); PerRayData_ambient ambient_prd; ambient_prd.result = make_float3( 0.0f ); ambient_prd.surface_point = hit; ambient_prd.surface_normal = pnormal; ambient_prd.ambient_depth = prd.ambient_depth; ambient_prd.wsum = 0.0f; ambient_prd.weight = prd.weight; #ifdef DAYSIM_COMPATIBLE ambient_prd.dc = daysimNext(prd.dc); daysimSet(ambient_prd.dc, 0.0f); #endif #ifdef HIT_COUNT ambient_prd.hit_count = 0; #endif const float tmax = ray_start(hit, AMBIENT_RAY_LENGTH); Ray ambient_ray = make_Ray(hit - normal * tmax, normal, AMBIENT_RAY, 0.0f, 2.0f * tmax); rtTrace(top_ambient, ambient_ray, ambient_prd, RT_VISIBILITY_ALL, RT_RAY_FLAG_DISABLE_CLOSESTHIT); #ifdef HIT_COUNT prd.hit_count += ambient_prd.hit_count; #endif if (ambient_prd.wsum > FTINY) { // TODO if miss program is called, set wsum = 1.0f or place this before ambacc == 0.0f ambient_prd.result *= 1.0f / ambient_prd.wsum; #ifdef DAYSIM_COMPATIBLE daysimAddScaled(prd.dc, ambient_prd.dc, aval.x / ambient_prd.wsum); #endif return aval * ambient_prd.result; } //rdepth++; /* need to cache new value */ //d = makeambient(acol, r, normal, rdepth-1); //TODO implement as miss program for ambient ray //rdepth--; //if ( dot( ambient_prd.result, ambient_prd.result) > FTINY) { // quick check to see if a value was returned by miss program // return aval * ambient_prd.result; /* got new value */ //} do_ambient = !prd.ambient_depth && ambdiv_final; } if (do_ambient) { /* no ambient storage */ /* Option to show error if nothing found */ if (ambdiv_final < 0) rtThrow(RT_EXCEPTION_CUSTOM - ambdiv_final); float3 acol = aval; #ifdef DAYSIM_COMPATIBLE DaysimCoef dc = daysimNext(prd.dc); daysimSet(dc, 0.0f); d = doambient(&acol, normal, pnormal, hit, prd, dc); if (d > FTINY) daysimAdd(prd.dc, dc); #else d = doambient(&acol, normal, pnormal, hit, prd); #endif if (d > FTINY) return acol; } dumbamb: /* return global value */ if ((ambvwt <= 0) || (navsum == 0)) { #ifdef DAYSIM_COMPATIBLE daysimAdd(prd.dc, aval.x * ambval.x); #endif return aval * ambval; } float l = bright(ambval); /* average in computations */ if (l > FTINY) { d = (logf(l)*(float)ambvwt + avsum) / (float)(ambvwt + navsum); d = expf(d) / l; aval *= ambval; /* apply color of ambval */ #ifdef DAYSIM_COMPATIBLE daysimAdd(prd.dc, aval.x * ambval.x * d); #endif } else { d = expf( avsum / (float)navsum ); #ifdef DAYSIM_COMPATIBLE daysimAdd(prd.dc, aval.x * d); #endif } return aval * d; } /* sample indirect hemisphere, based on samp_hemi in ambcomp.c */ #ifdef DAYSIM_COMPATIBLE RT_METHOD int doambient(float3 *rcol, const float3& normal, const float3& pnormal, const float3& hit, PerRayData_radiance &prd, DaysimCoef dc) #else RT_METHOD int doambient(float3 *rcol, const float3& normal, const float3& pnormal, const float3& hit, PerRayData_radiance &prd) #endif { float d; float wt = prd.weight; /* set number of divisions */ if (wt > (d = 0.8f * fmaxf(*rcol) * wt / (ambdiv_final * minweight))) // Ignore ambacc <= FTINY check because this is faking ambacc == 0 calc wt = d; /* avoid ray termination */ int n = sqrtf(ambdiv_final * wt) + 0.5f; int i = 1 + 5 * (ambacc > FTINY); /* minimum number of samples */ if (n < i) n = i; const int nn = n * n; float3 acol = make_float3( 0.0f ); unsigned int sampOK = 0u; /* assign coefficient */ float3 acoef = *rcol / nn; /* Setup from ambsample in ambcomp.c */ PerRayData_radiance new_prd; #ifdef DAYSIM_COMPATIBLE new_prd.dc = daysimNext(dc); #endif Ray amb_ray = make_Ray( hit, pnormal, RADIANCE_RAY, RAY_START, RAY_END ); // Use normal point as temporary direction /* End ambsample setup */ /* make tangent plane axes */ float3 ux = getperpendicular( pnormal, prd.state ); float3 uy = cross( pnormal, ux ); /* sample divisions */ for (i = n; i--; ) for (int j = n; j--; ) { if (!rayorigin(new_prd, prd, acoef, 1, 1)) continue; //hp.sampOK += ambsample( &hp, i, j, normal, hit ); /* ambsample in ambcomp.c */ float2 spt = make_float2(curand_uniform(prd.state), curand_uniform(prd.state)); if (i > 0 && i < n - 1 && j > 0 && j < n - 1) /* avoid coincident samples */ spt = 0.1f + 0.8f * spt; SDsquare2disk( spt, (j+spt.y) / n, (i+spt.x) / n ); float zd = sqrtf( 1.0f - dot( spt, spt ) ); amb_ray.direction = normalize( spt.x*ux + spt.y*uy + zd*pnormal ); if (dot(amb_ray.direction, normal) <= 0) /* Prevent light leaks */ continue; amb_ray.tmin = ray_start( hit, amb_ray.direction, normal, RAY_START ); amb_ray.tmax = new_prd.tmax; //dimlist[ndims++] = AI(hp,i,j) + 90171; setupPayload(new_prd); //Ray amb_ray = make_Ray( hit, rdir, RADIANCE_RAY, RAY_START, new_prd.tmax ); rtTrace(top_object, amb_ray, new_prd); resolvePayload(prd, new_prd); //ndims--; if ( isnan( new_prd.result ) ) // TODO How does this happen? continue; if ( new_prd.distance <= FTINY ) continue; /* should never happen */ acol += new_prd.result * acoef; /* add to our sum */ #ifdef DAYSIM_COMPATIBLE daysimAddScaled(dc, new_prd.dc, acoef.x); #endif sampOK++; } *rcol = acol; if ( !sampOK ) { /* utter failure? */ return( 0 ); } if ( sampOK < nn ) { //hp.sampOK *= -1; /* soft failure */ return( 1 ); } //n = ambssamp * wt + 0.5f; //if (n > 8) { /* perform super-sampling? */ // ambsupersamp(hp, n); // *rcol = hp.acol; //} return( 1 ); /* all is well */ } #endif /* AMBIENT */ #ifdef LIGHTS /* partition a flat source */ RT_METHOD unsigned int flatpart( const float3& v, const float3& r0, const float3& r1, const float3& r2, const float& weight ) { //float3 vp = source[si->sn].snorm; //if ( dot( v, vp ) <= 0.0f ) /* behind source */ // return 0u; if ( srcsizerat <= FTINY ) return 1u; float d; /* Find longest edge */ float3 vp = r1 - r0; float d2 = dot( vp, vp ); vp = r2 - r1; if ( ( d = dot( vp, vp ) ) > d2 ) d2 = d; vp = r2 - r0; if ( ( d = dot( vp, vp ) ) > d2 ) d2 = d; /* Find minimum partition size */ d = srcsizerat / weight; d *= d * dot( v, v ); /* Find number of partions */ d2 /= d; if ( d2 < 1.0f ) return 1u; if ( d2 > ( d = MAXSPART >> 1 ) ) // Divide maximum partitions by two going from rectangle to triangle d2 = d; return (unsigned int)sqrtf( d2 ); } /* Solid angle calculation from "The solid angle of a plane triangle", A van Oosterom and J Strackee */ RT_METHOD float solid_angle( const float3& r0, const float3& r1, const float3& r2 ) { const float l0 = length( r0 ); const float l1 = length( r1 ); const float l2 = length( r2 ); const float numerator = dot( r0, cross( r1, r2 ) ); const float denominator = l0 * l1 * l2 + dot( r0, r1 ) * l2 + dot( r0, r2 ) * l1 + dot( r1, r2 ) * l0; return 2.0f * fabsf( atan2( numerator, denominator ) ); } /* Compute point from barycentric coordinates and flip if outside triangle */ RT_METHOD float3 barycentric( float2& lambda, const float3& r0, const float3& r1, const float3& r2, const int flip ) { if ( flip ) lambda = 1.0f - lambda; return r0 * ( 1.0f - lambda.x - lambda.y ) + r1 * lambda.x + r2 * lambda.y; } #endif /* LIGHTS */ /* convert 1-dimensional sample to 2 dimensions, based on multisamp.c */ //RT_METHOD float2 multisamp2(float r) /* 1-dimensional sample [0,1) */ //{ // int j; // register int k; // int2 ti; // float s; // // ti = make_int2( 0 ); // j = 8; // while (j--) { // k = s = r*(1<<2); // r = s - k; // ti += ti + make_int2( ((k>>2) & 1), ((k>>1) & 1) ); // } // ti += make_int2( frandom() ); // ti *= 1.0f/256.0f; //} /* hash a set of integer values */ //RT_METHOD int ilhash(int3 d) //{ // register int hval; // // hval = 0; // hval ^= d.x * 73771; // hval ^= d.y * 96289; // hval ^= d.z * 103699; // return(hval & 0x7fffffff); //}
7f160ec049761dfc1e694d9a6c4d21f1dd42764e.hip
// !!! This is a file automatically generated by hipify!!! // Demonstration of kernel execution configuration for a one-dimensional // grid. // Example for video 2.2. #include <hip/hip_runtime_api.h> #include <stdio.h> // Error checking macro #define cudaCheckError(code) \ { \ if ((code) != hipSuccess) { \ fprintf(stderr, "Cuda failure %s:%d: '%s' \n", __FILE__, __LINE__, \ hipGetErrorString(code)); \ } \ } __global__ void kernel_1d() { int index = blockIdx.x * blockDim.x + threadIdx.x; printf("block %d, thread %d, index %d\n", blockIdx.x, threadIdx.x, index); } int main() { hipLaunchKernelGGL(( kernel_1d), dim3(4), dim3(8), 0, 0, ); cudaCheckError(hipDeviceSynchronize()); }
7f160ec049761dfc1e694d9a6c4d21f1dd42764e.cu
// Demonstration of kernel execution configuration for a one-dimensional // grid. // Example for video 2.2. #include <cuda_runtime_api.h> #include <stdio.h> // Error checking macro #define cudaCheckError(code) \ { \ if ((code) != cudaSuccess) { \ fprintf(stderr, "Cuda failure %s:%d: '%s' \n", __FILE__, __LINE__, \ cudaGetErrorString(code)); \ } \ } __global__ void kernel_1d() { int index = blockIdx.x * blockDim.x + threadIdx.x; printf("block %d, thread %d, index %d\n", blockIdx.x, threadIdx.x, index); } int main() { kernel_1d<<<4, 8>>>(); cudaCheckError(cudaDeviceSynchronize()); }
b6a06f62e75e0e64cf53bbb7aa7b7b2a6b5712cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "stdlib.h" #include "stdio.h" bool initCuda(); hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b,clock_t* time) { clock_t start = clock(); int i = threadIdx.x; c[i] = a[i] + b[i]; *time = clock() - start; } int main() { initCuda(); const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } void printDeviceProp(const hipDeviceProp_t &prop) { printf("---------------------------------------------------- \n"); printf("Device Name : %s \n", prop.name); printf("totalGlobalMem : %zu MB\n",prop.totalGlobalMem/(1<<20)); printf("sharedMemPerBlock : %zu B \n", prop.sharedMemPerBlock); printf("regsPerBlock : %d \n", prop.regsPerBlock); printf("warpSize : %d \n", prop.warpSize); printf("memPitch : %zu B \n", prop.memPitch); printf("maxThreadsPerBlock : %d \n", prop.maxThreadsPerBlock); printf("maxThreadsDim[0 - 2] : %d %d %d \n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("maxGridSize[0 - 2] : %d %d %d \n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("totalConstMem : %zu B \n", prop.totalConstMem); printf("major.minor : %d.%d \n", prop.major, prop.minor); printf("clockRate : %d khz \n", prop.clockRate); printf("textureAlignment : %zu B \n", prop.textureAlignment); printf("deviceOverlap : %d \n", prop.deviceOverlap); printf("multiProcessorCount : %d \n", prop.multiProcessorCount); printf("---------------------------------------------------- \n"); } bool initCuda() { hipError_t cudaStatus; int count; hipGetDeviceCount(&count); if(count == 0) { printf("There is no device \n"); return false; } int i; for(i=0;i<count;++i) { hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop,i) == hipSuccess) { if(prop.major>=1) { // printf("cuda device id: %d\n",i); printDeviceProp(prop); break; } } } if(i==count) { printf("There is no device supporting cuda 1.x\n"); return false; } cudaStatus = hipSetDevice(0); return cudaStatus; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; clock_t* time; clock_t time_used; // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&time, sizeof(clock_t)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. // CUDA <<<block , thread , shared memory >>>(...); hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b, time); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(&time_used, time, sizeof(clock_t), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } printf("GPU time: %ld\n", time_used); Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
b6a06f62e75e0e64cf53bbb7aa7b7b2a6b5712cd.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "stdlib.h" #include "stdio.h" bool initCuda(); cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b,clock_t* time) { clock_t start = clock(); int i = threadIdx.x; c[i] = a[i] + b[i]; *time = clock() - start; } int main() { initCuda(); const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } void printDeviceProp(const cudaDeviceProp &prop) { printf("---------------------------------------------------- \n"); printf("Device Name : %s \n", prop.name); printf("totalGlobalMem : %zu MB\n",prop.totalGlobalMem/(1<<20)); printf("sharedMemPerBlock : %zu B \n", prop.sharedMemPerBlock); printf("regsPerBlock : %d \n", prop.regsPerBlock); printf("warpSize : %d \n", prop.warpSize); printf("memPitch : %zu B \n", prop.memPitch); printf("maxThreadsPerBlock : %d \n", prop.maxThreadsPerBlock); printf("maxThreadsDim[0 - 2] : %d %d %d \n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("maxGridSize[0 - 2] : %d %d %d \n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("totalConstMem : %zu B \n", prop.totalConstMem); printf("major.minor : %d.%d \n", prop.major, prop.minor); printf("clockRate : %d khz \n", prop.clockRate); printf("textureAlignment : %zu B \n", prop.textureAlignment); printf("deviceOverlap : %d \n", prop.deviceOverlap); printf("multiProcessorCount : %d \n", prop.multiProcessorCount); printf("---------------------------------------------------- \n"); } bool initCuda() { cudaError_t cudaStatus; int count; cudaGetDeviceCount(&count); if(count == 0) { printf("There is no device \n"); return false; } int i; for(i=0;i<count;++i) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i) == cudaSuccess) { if(prop.major>=1) { //打印设备信息 printf("cuda device id: %d\n",i); printDeviceProp(prop); break; } } } if(i==count) { printf("There is no device supporting cuda 1.x\n"); return false; } cudaStatus = cudaSetDevice(0); return cudaStatus; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; clock_t* time; clock_t time_used; // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&time, sizeof(clock_t)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. // 在CUDA 中执行函数 语法:函数名称<<<block 数目, thread 数目, shared memory 大小>>>(参数...); addKernel<<<1, size>>>(dev_c, dev_a, dev_b, time); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(&time_used, time, sizeof(clock_t), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } printf("GPU time: %ld\n", time_used); Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
96ef04dcbaf4da50c6eb789b2804860682cf6438.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_minus_2_b; int xdim0_update_halo_kernel3_minus_2_b_h = -1; __constant__ int ydim0_update_halo_kernel3_minus_2_b; int ydim0_update_halo_kernel3_minus_2_b_h = -1; __constant__ int xdim1_update_halo_kernel3_minus_2_b; int xdim1_update_halo_kernel3_minus_2_b_h = -1; __constant__ int ydim1_update_halo_kernel3_minus_2_b; int ydim1_update_halo_kernel3_minus_2_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel3_minus_2_b * (y) + \ xdim0_update_halo_kernel3_minus_2_b * ydim0_update_halo_kernel3_minus_2_b * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel3_minus_2_b * (y) + \ xdim1_update_halo_kernel3_minus_2_b * ydim1_update_halo_kernel3_minus_2_b * \ (z)) // user function __device__ inline void update_halo_kernel3_minus_2_b(double *vol_flux_x, double *mass_flux_x, const int *fields) { if (fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0, 0, 0)] = -(vol_flux_x[OPS_ACC0(-2, 0, 0)]); if (fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0, 0, 0)] = -(mass_flux_x[OPS_ACC1(-2, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_minus_2_b(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_minus_2_b + idx_z * 1 * 1 * xdim0_update_halo_kernel3_minus_2_b * ydim0_update_halo_kernel3_minus_2_b; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_minus_2_b + idx_z * 1 * 1 * xdim1_update_halo_kernel3_minus_2_b * ydim1_update_halo_kernel3_minus_2_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_minus_2_b(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel3_minus_2_b(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 112)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(112, "update_halo_kernel3_minus_2_b"); OPS_kernels[112].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_minus_2_b_h || ydim0 != ydim0_update_halo_kernel3_minus_2_b_h || xdim1 != xdim1_update_halo_kernel3_minus_2_b_h || ydim1 != ydim1_update_halo_kernel3_minus_2_b_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel3_minus_2_b, &xdim0, sizeof(int)); xdim0_update_halo_kernel3_minus_2_b_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel3_minus_2_b, &ydim0, sizeof(int)); ydim0_update_halo_kernel3_minus_2_b_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel3_minus_2_b, &xdim1, sizeof(int)); xdim1_update_halo_kernel3_minus_2_b_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel3_minus_2_b, &ydim1, sizeof(int)); ydim1_update_halo_kernel3_minus_2_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[112].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel3_minus_2_b), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[112].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[112].mpi_time += t2 - t1; OPS_kernels[112].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[112].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
96ef04dcbaf4da50c6eb789b2804860682cf6438.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_minus_2_b; int xdim0_update_halo_kernel3_minus_2_b_h = -1; __constant__ int ydim0_update_halo_kernel3_minus_2_b; int ydim0_update_halo_kernel3_minus_2_b_h = -1; __constant__ int xdim1_update_halo_kernel3_minus_2_b; int xdim1_update_halo_kernel3_minus_2_b_h = -1; __constant__ int ydim1_update_halo_kernel3_minus_2_b; int ydim1_update_halo_kernel3_minus_2_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel3_minus_2_b * (y) + \ xdim0_update_halo_kernel3_minus_2_b * ydim0_update_halo_kernel3_minus_2_b * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel3_minus_2_b * (y) + \ xdim1_update_halo_kernel3_minus_2_b * ydim1_update_halo_kernel3_minus_2_b * \ (z)) // user function __device__ inline void update_halo_kernel3_minus_2_b(double *vol_flux_x, double *mass_flux_x, const int *fields) { if (fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0, 0, 0)] = -(vol_flux_x[OPS_ACC0(-2, 0, 0)]); if (fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0, 0, 0)] = -(mass_flux_x[OPS_ACC1(-2, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_minus_2_b(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_minus_2_b + idx_z * 1 * 1 * xdim0_update_halo_kernel3_minus_2_b * ydim0_update_halo_kernel3_minus_2_b; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_minus_2_b + idx_z * 1 * 1 * xdim1_update_halo_kernel3_minus_2_b * ydim1_update_halo_kernel3_minus_2_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_minus_2_b(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel3_minus_2_b(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 112)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(112, "update_halo_kernel3_minus_2_b"); OPS_kernels[112].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_minus_2_b_h || ydim0 != ydim0_update_halo_kernel3_minus_2_b_h || xdim1 != xdim1_update_halo_kernel3_minus_2_b_h || ydim1 != ydim1_update_halo_kernel3_minus_2_b_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel3_minus_2_b, &xdim0, sizeof(int)); xdim0_update_halo_kernel3_minus_2_b_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel3_minus_2_b, &ydim0, sizeof(int)); ydim0_update_halo_kernel3_minus_2_b_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel3_minus_2_b, &xdim1, sizeof(int)); xdim1_update_halo_kernel3_minus_2_b_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel3_minus_2_b, &ydim1, sizeof(int)); ydim1_update_halo_kernel3_minus_2_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[112].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel3_minus_2_b<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[112].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[112].mpi_time += t2 - t1; OPS_kernels[112].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[112].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
a7ce785f937ef4ce8c97cf94c8b883ee1e998139.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define MAXBLOCKS 1 #define MAXTHREADS 1 __global__ void SimpleAddition(int *a, int *b, int *c) { *c = *a + *b; } int main() { int a, b, c; //CPU int *d_a, *d_b, *d_c;//GPU //Allocate GPU memory hipMalloc((void **)&d_a, sizeof(int)); hipMalloc((void **)&d_b, sizeof(int)); hipMalloc((void **)&d_c, sizeof(int)); a = 1; b = 2; c = 0; //Copy data to GPU hipMemcpy(d_a, &a, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, &b, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_c, &c, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( SimpleAddition), dim3(MAXBLOCKS), dim3(MAXTHREADS), 0, 0, d_a, d_b, d_c); //Copy result back to CPU hipMemcpy(&c, d_c, sizeof(int), hipMemcpyDeviceToHost); printf("%d + %d = %d\n", a, b, c); //Free GPU memory hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
a7ce785f937ef4ce8c97cf94c8b883ee1e998139.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define MAXBLOCKS 1 #define MAXTHREADS 1 __global__ void SimpleAddition(int *a, int *b, int *c) { *c = *a + *b; } int main() { int a, b, c; //CPU int *d_a, *d_b, *d_c;//GPU //Allocate GPU memory cudaMalloc((void **)&d_a, sizeof(int)); cudaMalloc((void **)&d_b, sizeof(int)); cudaMalloc((void **)&d_c, sizeof(int)); a = 1; b = 2; c = 0; //Copy data to GPU cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_c, &c, sizeof(int), cudaMemcpyHostToDevice); SimpleAddition<<<MAXBLOCKS, MAXTHREADS>>>(d_a, d_b, d_c); //Copy result back to CPU cudaMemcpy(&c, d_c, sizeof(int), cudaMemcpyDeviceToHost); printf("%d + %d = %d\n", a, b, c); //Free GPU memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
3e2163e846a28c0cf227a9ecee25ad45c6607739.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" namespace oneflow { namespace { __global__ void DynamicLossScaleScheduleGpu(const int64_t increment_period, const float multiplier, const int64_t* count_not_finite, float* loss_scale, int64_t* good_step_counter) { if (*count_not_finite == 0) { int64_t cur_good_step_counter = *good_step_counter + 1; if (cur_good_step_counter >= increment_period) { *loss_scale = static_cast<float>( min(static_cast<double>(*loss_scale) * multiplier, static_cast<double>(FLT_MAX))); cur_good_step_counter = 0; } *good_step_counter = cur_good_step_counter; } else { *good_step_counter = 0; *loss_scale = static_cast<float>(max(static_cast<double>(*loss_scale) / multiplier, 1.0)); } } } // namespace class DynamicLossScaleScheduleGpuKernel final : public user_op::OpKernel { public: DynamicLossScaleScheduleGpuKernel() = default; ~DynamicLossScaleScheduleGpuKernel() override = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* count_not_finite = ctx->Tensor4ArgNameAndIndex("count_not_finite", 0); user_op::Tensor* loss_scale = ctx->Tensor4ArgNameAndIndex("loss_scale", 0); user_op::Tensor* good_step_counter = ctx->Tensor4ArgNameAndIndex("good_step_counter", 0); const auto increment_period = ctx->Attr<int64_t>("increment_period"); const auto multiplier = ctx->Attr<float>("multiplier"); hipLaunchKernelGGL(( DynamicLossScaleScheduleGpu), dim3(1), dim3(1), 0, ctx->device_ctx()->cuda_stream(), increment_period, multiplier, count_not_finite->dptr<int64_t>(), loss_scale->mut_dptr<float>(), good_step_counter->mut_dptr<int64_t>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return true; } }; REGISTER_USER_KERNEL("dynamic_loss_scale_schedule") .SetCreateFn<DynamicLossScaleScheduleGpuKernel>() .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")); } // namespace oneflow
3e2163e846a28c0cf227a9ecee25ad45c6607739.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" namespace oneflow { namespace { __global__ void DynamicLossScaleScheduleGpu(const int64_t increment_period, const float multiplier, const int64_t* count_not_finite, float* loss_scale, int64_t* good_step_counter) { if (*count_not_finite == 0) { int64_t cur_good_step_counter = *good_step_counter + 1; if (cur_good_step_counter >= increment_period) { *loss_scale = static_cast<float>( min(static_cast<double>(*loss_scale) * multiplier, static_cast<double>(FLT_MAX))); cur_good_step_counter = 0; } *good_step_counter = cur_good_step_counter; } else { *good_step_counter = 0; *loss_scale = static_cast<float>(max(static_cast<double>(*loss_scale) / multiplier, 1.0)); } } } // namespace class DynamicLossScaleScheduleGpuKernel final : public user_op::OpKernel { public: DynamicLossScaleScheduleGpuKernel() = default; ~DynamicLossScaleScheduleGpuKernel() override = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* count_not_finite = ctx->Tensor4ArgNameAndIndex("count_not_finite", 0); user_op::Tensor* loss_scale = ctx->Tensor4ArgNameAndIndex("loss_scale", 0); user_op::Tensor* good_step_counter = ctx->Tensor4ArgNameAndIndex("good_step_counter", 0); const auto increment_period = ctx->Attr<int64_t>("increment_period"); const auto multiplier = ctx->Attr<float>("multiplier"); DynamicLossScaleScheduleGpu<<<1, 1, 0, ctx->device_ctx()->cuda_stream()>>>( increment_period, multiplier, count_not_finite->dptr<int64_t>(), loss_scale->mut_dptr<float>(), good_step_counter->mut_dptr<int64_t>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return true; } }; REGISTER_USER_KERNEL("dynamic_loss_scale_schedule") .SetCreateFn<DynamicLossScaleScheduleGpuKernel>() .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")); } // namespace oneflow
90b22125e1b36b0454a0732a02c3edb91cd00bcc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zlascl.cu normal z -> d, Fri Jan 30 19:00:09 2015 @author Mark Gates */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void dlascl_full(int m, int n, double mul, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void dlascl_lower(int m, int n, double mul, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; A += ind; if (ind < m) { for(int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void dlascl_upper(int m, int n, double mul, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /** Purpose ------- DLASCL multiplies the M by N real matrix A by the real scalar CTO/CFROM. This is done without over/underflow as long as the final result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- \param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. \param[in] kl INTEGER Unused, for LAPACK compatability. \param[in] ku KU is INTEGER Unused, for LAPACK compatability. \param[in] cfrom DOUBLE PRECISION \param[in] cto DOUBLE PRECISION \n The matrix A is multiplied by CTO/CFROM. A(I,J) is computed without over/underflow if the final result CTO*A(I,J)/CFROM can be represented without over/underflow. CFROM must be nonzero. CFROM and CTO must not be NAN. \param[in] m INTEGER The number of rows of the matrix A. M >= 0. \param[in] n INTEGER The number of columns of the matrix A. N >= 0. \param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix to be multiplied by CTO/CFROM. See TYPE for the storage type. \param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). \param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl_q( magma_type_t type, magma_int_t kl, magma_int_t ku, double cfrom, double cto, magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( cfrom == 0 || isnan(cfrom) ) *info = -4; else if ( isnan(cto) ) *info = -5; else if ( m < 0 ) *info = -6; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -7; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( (m + NB - 1)/NB ); dim3 threads( NB ); double smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul; magma_int_t done = false; // Uses over/underflow procedure from LAPACK dlascl // Get machine parameters smlnum = lapackf77_dlamch("s"); bignum = 1 / smlnum; cfromc = cfrom; ctoc = cto; int cnt = 0; while( ! done ) { cfrom1 = cfromc*smlnum; if( cfrom1 == cfromc ) { // cfromc is an inf. Multiply by a correctly signed zero for // finite ctoc, or a nan if ctoc is infinite. mul = ctoc / cfromc; done = true; cto1 = ctoc; } else { cto1 = ctoc / bignum; if( cto1 == ctoc ) { // ctoc is either 0 or an inf. In both cases, ctoc itself // serves as the correct multiplication factor. mul = ctoc; done = true; cfromc = 1; } else if( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) { mul = smlnum; done = false; cfromc = cfrom1; } else if( fabs(cto1) > fabs(cfromc) ) { mul = bignum; done = false; ctoc = cto1; } else { mul = ctoc / cfromc; done = true; } } if (type == MagmaLower) { hipLaunchKernelGGL(( dlascl_lower) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda); } else if (type == MagmaUpper) { hipLaunchKernelGGL(( dlascl_upper) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda); } else if (type == MagmaFull) { hipLaunchKernelGGL(( dlascl_full) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda); } cnt += 1; } } /** @see magmablas_dlascl_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl( magma_type_t type, magma_int_t kl, magma_int_t ku, double cfrom, double cto, magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t *info ) { magmablas_dlascl_q( type, kl, ku, cfrom, cto, m, n, dA, ldda, magma_stream, info ); }
90b22125e1b36b0454a0732a02c3edb91cd00bcc.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zlascl.cu normal z -> d, Fri Jan 30 19:00:09 2015 @author Mark Gates */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void dlascl_full(int m, int n, double mul, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void dlascl_lower(int m, int n, double mul, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; A += ind; if (ind < m) { for(int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void dlascl_upper(int m, int n, double mul, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; A += ind; if (ind < m) { for(int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /** Purpose ------- DLASCL multiplies the M by N real matrix A by the real scalar CTO/CFROM. This is done without over/underflow as long as the final result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- \param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. \param[in] kl INTEGER Unused, for LAPACK compatability. \param[in] ku KU is INTEGER Unused, for LAPACK compatability. \param[in] cfrom DOUBLE PRECISION \param[in] cto DOUBLE PRECISION \n The matrix A is multiplied by CTO/CFROM. A(I,J) is computed without over/underflow if the final result CTO*A(I,J)/CFROM can be represented without over/underflow. CFROM must be nonzero. CFROM and CTO must not be NAN. \param[in] m INTEGER The number of rows of the matrix A. M >= 0. \param[in] n INTEGER The number of columns of the matrix A. N >= 0. \param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix to be multiplied by CTO/CFROM. See TYPE for the storage type. \param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). \param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl_q( magma_type_t type, magma_int_t kl, magma_int_t ku, double cfrom, double cto, magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( cfrom == 0 || isnan(cfrom) ) *info = -4; else if ( isnan(cto) ) *info = -5; else if ( m < 0 ) *info = -6; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -7; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( (m + NB - 1)/NB ); dim3 threads( NB ); double smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul; magma_int_t done = false; // Uses over/underflow procedure from LAPACK dlascl // Get machine parameters smlnum = lapackf77_dlamch("s"); bignum = 1 / smlnum; cfromc = cfrom; ctoc = cto; int cnt = 0; while( ! done ) { cfrom1 = cfromc*smlnum; if( cfrom1 == cfromc ) { // cfromc is an inf. Multiply by a correctly signed zero for // finite ctoc, or a nan if ctoc is infinite. mul = ctoc / cfromc; done = true; cto1 = ctoc; } else { cto1 = ctoc / bignum; if( cto1 == ctoc ) { // ctoc is either 0 or an inf. In both cases, ctoc itself // serves as the correct multiplication factor. mul = ctoc; done = true; cfromc = 1; } else if( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) { mul = smlnum; done = false; cfromc = cfrom1; } else if( fabs(cto1) > fabs(cfromc) ) { mul = bignum; done = false; ctoc = cto1; } else { mul = ctoc / cfromc; done = true; } } if (type == MagmaLower) { dlascl_lower <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda); } else if (type == MagmaUpper) { dlascl_upper <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda); } else if (type == MagmaFull) { dlascl_full <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda); } cnt += 1; } } /** @see magmablas_dlascl_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl( magma_type_t type, magma_int_t kl, magma_int_t ku, double cfrom, double cto, magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t *info ) { magmablas_dlascl_q( type, kl, ku, cfrom, cto, m, n, dA, ldda, magma_stream, info ); }
2a785427abd33a6cb694c77fefbf1fdfbc4fc944.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ristretto/base_ristretto_layer.hpp" #include "ristretto/base_ristretto_layer.cuh" namespace caffe { template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeWeights_gpu( vector<shared_ptr<Blob<Dtype> > > weights_quantized, const int rounding, const bool bias_term) { Dtype* weight = weights_quantized[0]->mutable_gpu_data(); const int cnt_weight = weights_quantized[0]->count(); switch (precision_) { case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(weight, cnt_weight, bw_params_, rounding, fl_params_); if (bias_term) { Trim2FixedPoint_gpu(weights_quantized[1]->mutable_gpu_data(), weights_quantized[1]->count(), bw_params_ + bw_layer_out_, rounding, bw_params_ + fl_layer_out_); } break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeLayerInputs_gpu( Dtype* data, const int count) { switch (precision_) { case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(data, count, bw_layer_in_, rounding_, fl_layer_in_); break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeLayerOutputs_gpu(Dtype* data, const int count) { switch (precision_) { case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(data, count, bw_layer_out_, rounding_, fl_layer_out_); break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> __global__ void Trim2FixedPoint_kernel(Dtype* data, const int cnt, const int bit_width, const int rounding, const int fl) { CUDA_KERNEL_LOOP(index, cnt) { // round data data[index] *= powf(2, fl); switch (rounding) { case QuantizationParameter_Rounding_NEAREST: data[index] = rintf(data[index]); break; case QuantizationParameter_Rounding_STOCHASTIC: //data[index] += RandUniform_device(index); data[index] = __float2int_rd(data[index] + RandUniform_device(index)); // ??? somehow this is working ??? break; default: break; } // saturate data Dtype max_data = (powf(2, bit_width - 1) - 1.0); Dtype min_data = -powf(2, bit_width - 1); data[index] = fmaxf(fminf(data[index], max_data), min_data); // back to float data[index] *= powf(2, -fl); } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::Trim2FixedPoint_gpu(Dtype* data, const int cnt, const int bit_width, const int rounding, const int fl) { hipLaunchKernelGGL(( Trim2FixedPoint_kernel), dim3(CAFFE_GET_BLOCKS(cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, data, cnt, bit_width, rounding, fl); } // Explicit instantiations template void BaseRistrettoLayer<double>::QuantizeWeights_gpu( vector<shared_ptr<Blob<double> > > weights_quantized, const int rounding, const bool bias_term); template void BaseRistrettoLayer<float>::QuantizeWeights_gpu( vector<shared_ptr<Blob<float> > > weights_quantized, const int rounding, const bool bias_term); template void BaseRistrettoLayer<double>::QuantizeLayerInputs_gpu(double* data, const int count); template void BaseRistrettoLayer<float>::QuantizeLayerInputs_gpu(float* data, const int count); template void BaseRistrettoLayer<double>::QuantizeLayerOutputs_gpu( double* top_data, const int top_count); template void BaseRistrettoLayer<float>::QuantizeLayerOutputs_gpu( float* top_data, const int top_count); template void BaseRistrettoLayer<double>::Trim2FixedPoint_gpu(double* data, const int cnt, const int bit_width, const int rounding, const int fl); template void BaseRistrettoLayer<float>::Trim2FixedPoint_gpu(float* data, const int cnt, const int bit_width, const int rounding, const int fl); } // namespace caffe
2a785427abd33a6cb694c77fefbf1fdfbc4fc944.cu
#include "ristretto/base_ristretto_layer.hpp" #include "ristretto/base_ristretto_layer.cuh" namespace caffe { template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeWeights_gpu( vector<shared_ptr<Blob<Dtype> > > weights_quantized, const int rounding, const bool bias_term) { Dtype* weight = weights_quantized[0]->mutable_gpu_data(); const int cnt_weight = weights_quantized[0]->count(); switch (precision_) { case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(weight, cnt_weight, bw_params_, rounding, fl_params_); if (bias_term) { Trim2FixedPoint_gpu(weights_quantized[1]->mutable_gpu_data(), weights_quantized[1]->count(), bw_params_ + bw_layer_out_, rounding, bw_params_ + fl_layer_out_); } break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeLayerInputs_gpu( Dtype* data, const int count) { switch (precision_) { case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(data, count, bw_layer_in_, rounding_, fl_layer_in_); break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeLayerOutputs_gpu(Dtype* data, const int count) { switch (precision_) { case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(data, count, bw_layer_out_, rounding_, fl_layer_out_); break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> __global__ void Trim2FixedPoint_kernel(Dtype* data, const int cnt, const int bit_width, const int rounding, const int fl) { CUDA_KERNEL_LOOP(index, cnt) { // round data data[index] *= powf(2, fl); switch (rounding) { case QuantizationParameter_Rounding_NEAREST: data[index] = rintf(data[index]); break; case QuantizationParameter_Rounding_STOCHASTIC: //data[index] += RandUniform_device(index); data[index] = __float2int_rd(data[index] + RandUniform_device(index)); // ??? somehow this is working ??? break; default: break; } // saturate data Dtype max_data = (powf(2, bit_width - 1) - 1.0); Dtype min_data = -powf(2, bit_width - 1); data[index] = fmaxf(fminf(data[index], max_data), min_data); // back to float data[index] *= powf(2, -fl); } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::Trim2FixedPoint_gpu(Dtype* data, const int cnt, const int bit_width, const int rounding, const int fl) { Trim2FixedPoint_kernel<<<CAFFE_GET_BLOCKS(cnt), CAFFE_CUDA_NUM_THREADS>>>( data, cnt, bit_width, rounding, fl); } // Explicit instantiations template void BaseRistrettoLayer<double>::QuantizeWeights_gpu( vector<shared_ptr<Blob<double> > > weights_quantized, const int rounding, const bool bias_term); template void BaseRistrettoLayer<float>::QuantizeWeights_gpu( vector<shared_ptr<Blob<float> > > weights_quantized, const int rounding, const bool bias_term); template void BaseRistrettoLayer<double>::QuantizeLayerInputs_gpu(double* data, const int count); template void BaseRistrettoLayer<float>::QuantizeLayerInputs_gpu(float* data, const int count); template void BaseRistrettoLayer<double>::QuantizeLayerOutputs_gpu( double* top_data, const int top_count); template void BaseRistrettoLayer<float>::QuantizeLayerOutputs_gpu( float* top_data, const int top_count); template void BaseRistrettoLayer<double>::Trim2FixedPoint_gpu(double* data, const int cnt, const int bit_width, const int rounding, const int fl); template void BaseRistrettoLayer<float>::Trim2FixedPoint_gpu(float* data, const int cnt, const int bit_width, const int rounding, const int fl); } // namespace caffe
f09d237947c2b8c2bce6e22378a6fdd90e9bb8e3.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <memory> #include <cassert> using namespace std; #include <hip/hip_runtime.h> __global__ void getValue(float *data) { data[0] = pow(data[1], data[2]); data[4] = min(data[1], data[2]); data[5] = max(data[1], data[2]); data[6] = ::max(data[1], data[2]); // data[7] = ::max(data[1], data[2]); } int main(int argc, char *argv[]) { int N = 1024; hipStream_t stream; hipStreamCreate__(&stream, 0); float *hostFloats1; hipHostMalloc((void **)&hostFloats1, N * sizeof(float), HIP_MEMHOSTALLOC_PORTABLE); hipDeviceptr_t deviceFloats1; cuMemAlloc(&deviceFloats1, N * sizeof(float)); hostFloats1[0] = 0; hostFloats1[1] = 3; hostFloats1[2] = 4.5f; cuMemcpyHtoDAsync( (hipDeviceptr_t)(((float *)deviceFloats1)), hostFloats1, N * sizeof(float), stream ); // hipStreamSynchronize(stream); hipLaunchKernelGGL(( getValue), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, ((float *)deviceFloats1)); cuMemcpyDtoHAsync(hostFloats1, deviceFloats1, N * sizeof(float), stream); hipStreamSynchronize(stream); // and check the values... for(int i = 0; i < 7; i++) { cout << "hostFloats1[" << i << "]=" << hostFloats1[i] << endl; } // cout << hostFloats1[0] << endl; // cout << hostFloats1[1] << endl; // cout << hostFloats1[2] << endl; // cout << hostFloats1[4] << endl; // cout << hostFloats1[5] << endl; float diff = std::abs(hostFloats1[0] - 140.296); assert(diff < 0.01); assert(hostFloats1[4] == 3); assert(hostFloats1[5] == 4.5f); hipHostFree(hostFloats1); hipFree(deviceFloats1); hipStreamDestroy(stream); return 0; }
f09d237947c2b8c2bce6e22378a6fdd90e9bb8e3.cu
#include <iostream> #include <memory> #include <cassert> using namespace std; #include <cuda.h> __global__ void getValue(float *data) { data[0] = pow(data[1], data[2]); data[4] = min(data[1], data[2]); data[5] = max(data[1], data[2]); data[6] = ::max(data[1], data[2]); // data[7] = std::max(data[1], data[2]); } int main(int argc, char *argv[]) { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1; cuMemHostAlloc((void **)&hostFloats1, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE); CUdeviceptr deviceFloats1; cuMemAlloc(&deviceFloats1, N * sizeof(float)); hostFloats1[0] = 0; hostFloats1[1] = 3; hostFloats1[2] = 4.5f; cuMemcpyHtoDAsync( (CUdeviceptr)(((float *)deviceFloats1)), hostFloats1, N * sizeof(float), stream ); // cuStreamSynchronize(stream); getValue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(((float *)deviceFloats1)); cuMemcpyDtoHAsync(hostFloats1, deviceFloats1, N * sizeof(float), stream); cuStreamSynchronize(stream); // and check the values... for(int i = 0; i < 7; i++) { cout << "hostFloats1[" << i << "]=" << hostFloats1[i] << endl; } // cout << hostFloats1[0] << endl; // cout << hostFloats1[1] << endl; // cout << hostFloats1[2] << endl; // cout << hostFloats1[4] << endl; // cout << hostFloats1[5] << endl; float diff = std::abs(hostFloats1[0] - 140.296); assert(diff < 0.01); assert(hostFloats1[4] == 3); assert(hostFloats1[5] == 4.5f); cuMemFreeHost(hostFloats1); cuMemFree(deviceFloats1); cuStreamDestroy(stream); return 0; }
f761bb201f080a1a7201dfaf8f793190bcd1f504.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void stencil_no_sync(int *in, int *out) { __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex+RADIUS]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE + RADIUS]; } ////////////////////////////// missing sync thread //////////////////////// // Apply the stencil int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += temp[lindex + offset]; // Store the result out[gindex] = result; }
f761bb201f080a1a7201dfaf8f793190bcd1f504.cu
#include "includes.h" __global__ void stencil_no_sync(int *in, int *out) { __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex+RADIUS]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE + RADIUS]; } ////////////////////////////// missing sync thread //////////////////////// // Apply the stencil int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += temp[lindex + offset]; // Store the result out[gindex] = result; }
8ff0748d9b6f504859de3c0c3a59a4227605f2c0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include <cstdint> #define PEN_CLIP5 (5) #define TILE_SIDE (8) /* typename meaning : - B is for computing the Second Best Score. Its values are on enum FALSE(0)/TRUE(1). (sidenote: it's based on an enum instead of a bool in order to generalize its type from its Int value, with Int2Type meta-programming-template) */ /* //! Note from the bwa-gasal2 coder : I failed to understand it, so I copied it. //! You can say to me... You cheated not only the game, but yourself. You didn't grow. You didn't improve. You took a shortcut and gained nothing. You experienced a hollow victory. Nothing was risked and nothing was gained. It's sad that you don't know the difference. */ typedef struct { int32_t h, e; } eh_t; template <Bool B> __global__ void gasal_ksw_kernel(uint32_t *packed_query_batch, uint32_t *packed_target_batch, uint32_t *query_batch_lens, uint32_t *target_batch_lens, uint32_t *query_batch_offsets, uint32_t *target_batch_offsets, uint32_t *seed_score, gasal_res_t *device_res, gasal_res_t *device_res_second, int n_tasks) { const uint32_t tid = (blockIdx.x * blockDim.x) + threadIdx.x;//thread ID if (tid >= n_tasks) return; uint32_t packed_target_batch_idx = target_batch_offsets[tid] >> 3; //starting index of the target_batch sequence uint32_t packed_query_batch_idx = query_batch_offsets[tid] >> 3;//starting index of the query_batch sequence uint32_t qlen = query_batch_lens[tid]; uint32_t tlen = target_batch_lens[tid]; uint32_t query_batch_regs = (qlen >> 3) + 1;//(qlen >> 3) + (qlen & 0b0111 ? 1 : 0);//number of 32-bit words holding query_batch sequence uint32_t target_batch_regs = (tlen >> 3) + 1;//(tlen >> 3) + (tlen & 0b0111 ? 1 : 0);//number of 32-bit words holding target_batch sequence uint32_t h0 = seed_score[tid]; int32_t subScore; uint32_t gpac, rpac, gbase, rbase; int zdrop = 0; int o_del = _cudaGapO; int o_ins = _cudaGapO; int e_del = _cudaGapExtend; int e_ins = _cudaGapExtend; eh_t eh[MAX_QUERY_LEN]; // score array int j, oe_del = o_del + e_del, oe_ins = o_ins + e_ins, beg, end, max, max_i, max_j, max_ie, gscore, max_off; for(int i = 0; i < MAX_QUERY_LEN; i++) { eh[i].h = 0; eh[i].e = 0; } // fill the first row eh[0].h = h0; eh[1].h = h0 > oe_ins ? h0 - oe_ins : 0; for (j = 2; j <= qlen && eh[j - 1].h > e_ins; ++j) eh[j].h = eh[j - 1].h - e_ins; // DP loop max = h0, max_i = max_j = -1; max_ie = -1, gscore = -1; max_off = 0; beg = 0, end = qlen; for (uint32_t target_tile_id = 0; target_tile_id < target_batch_regs; target_tile_id++) //target_batch sequence in rows { gpac = packed_target_batch[packed_target_batch_idx + target_tile_id];//load 8 packed bases from target_batch sequence for (uint32_t target_base_id = 0; target_base_id < TILE_SIDE; target_base_id++) { int i = target_tile_id * TILE_SIDE + target_base_id; if (i >= tlen) // skip padding break; gbase = (gpac >> (32 - (target_base_id+1)*4 )) & 0x0F; /* get a base from target_batch sequence */ int t, f = 0, h1, m = 0, mj = -1; // compute the first column if (beg == 0) { h1 = h0 - (o_del + e_del * (i + 1)); if (h1 < 0) h1 = 0; } else h1 = 0; for(uint32_t query_tile_id = 0; (query_tile_id < query_batch_regs); query_tile_id++) { rpac = packed_query_batch[packed_query_batch_idx + query_tile_id];//load 8 bases from query_batch sequence for(uint32_t query_base_id = 0; (query_base_id < TILE_SIDE); query_base_id++) { j = query_tile_id * TILE_SIDE + query_base_id; if (j < beg) continue; if (j >= end) break; rbase = (rpac >> (32 - (query_base_id+1)*4 )) & 0x0F;//get a base from query_batch sequence // At the beginning of the loop: eh[j] = { H(i-1,j-1), E(i,j) }, f = F(i,j) and h1 = H(i,j-1) // Similar to SSE2-SW, cells are computed in the following order: // H(i,j) = max{H(i-1,j-1)+S(i,j), E(i,j), F(i,j)} // E(i+1,j) = max{H(i,j)-gapo, E(i,j)} - gape // F(i,j+1) = max{H(i,j)-gapo, F(i,j)} - gape eh_t *p = &eh[j]; int h, M = p->h, e = p->e; // get H(i-1,j-1) and E(i-1,j) p->h = h1; // set H(i,j-1) for the next row subScore = DEV_GET_SUB_SCORE_LOCAL(rbase, gbase); M = M ? M + subScore : 0; // separating H and M to disallow a cigar like "100M3I3D20M" h = M > e ? M : e; // e and f are guaranteed to be non-negative, so h>=0 even if M<0 h = h > f ? h : f; h1 = h; // save H(i,j) to h1 for the next column mj = m > h ? mj : j; // record the position where max score is achieved m = m > h ? m : h; // m is stored at eh[mj+1] t = M - oe_del; t = t > 0 ? t : 0; e -= e_del; e = e > t ? e : t; // computed E(i+1,j) p->e = e; // save E(i+1,j) for the next row t = M - oe_ins; t = t > 0 ? t : 0; f -= e_ins; f = f > t ? f : t; // computed F(i,j+1) } } eh[end].h = h1; eh[end].e = 0; if (j == qlen) { max_ie = gscore > h1 ? max_ie : i; gscore = gscore > h1 ? gscore : h1; } if (m == 0) break; if (m > max) { max = m, max_i = i, max_j = mj; max_off = max_off > abs(mj - i) ? max_off : abs(mj - i); } else if (zdrop > 0) { if (i - max_i > mj - max_j) { if (max - m - ((i - max_i) - (mj - max_j)) * e_del > zdrop) break; } else { if (max - m - ((mj - max_j) - (i - max_i)) * e_ins > zdrop) break; } } /* This is defining from where to start the next row and where to end the computation of next row it skips some of the cells in the beginning and in the end of the row */ // update beg and end for the next round // COULD be done over a constant value... for (j = beg; (j < end) && eh[j].h == 0 && eh[j].e == 0; ++j) ; beg = j; for (j = end; (j >= beg) && eh[j].h == 0 && eh[j].e == 0; --j) ; end = j + 2 < qlen ? j + 2 : qlen; //beg = 0; end = qlen; // uncomment this line for debugging } } if (gscore <= 0 || gscore <= max - PEN_CLIP5) { device_res->aln_score[tid] = max; device_res->query_batch_end[tid] = max_j + 1; device_res->target_batch_end[tid] = max_i + 1; } else { device_res->aln_score[tid] = gscore; device_res->query_batch_end[tid] = qlen; device_res->target_batch_end[tid] = max_ie + 1; } }
8ff0748d9b6f504859de3c0c3a59a4227605f2c0.cu
#pragma once #include <cstdint> #define PEN_CLIP5 (5) #define TILE_SIDE (8) /* typename meaning : - B is for computing the Second Best Score. Its values are on enum FALSE(0)/TRUE(1). (sidenote: it's based on an enum instead of a bool in order to generalize its type from its Int value, with Int2Type meta-programming-template) */ /* //! Note from the bwa-gasal2 coder : I failed to understand it, so I copied it. //! You can say to me... You cheated not only the game, but yourself. You didn't grow. You didn't improve. You took a shortcut and gained nothing. You experienced a hollow victory. Nothing was risked and nothing was gained. It's sad that you don't know the difference. */ typedef struct { int32_t h, e; } eh_t; template <Bool B> __global__ void gasal_ksw_kernel(uint32_t *packed_query_batch, uint32_t *packed_target_batch, uint32_t *query_batch_lens, uint32_t *target_batch_lens, uint32_t *query_batch_offsets, uint32_t *target_batch_offsets, uint32_t *seed_score, gasal_res_t *device_res, gasal_res_t *device_res_second, int n_tasks) { const uint32_t tid = (blockIdx.x * blockDim.x) + threadIdx.x;//thread ID if (tid >= n_tasks) return; uint32_t packed_target_batch_idx = target_batch_offsets[tid] >> 3; //starting index of the target_batch sequence uint32_t packed_query_batch_idx = query_batch_offsets[tid] >> 3;//starting index of the query_batch sequence uint32_t qlen = query_batch_lens[tid]; uint32_t tlen = target_batch_lens[tid]; uint32_t query_batch_regs = (qlen >> 3) + 1;//(qlen >> 3) + (qlen & 0b0111 ? 1 : 0);//number of 32-bit words holding query_batch sequence uint32_t target_batch_regs = (tlen >> 3) + 1;//(tlen >> 3) + (tlen & 0b0111 ? 1 : 0);//number of 32-bit words holding target_batch sequence uint32_t h0 = seed_score[tid]; int32_t subScore; uint32_t gpac, rpac, gbase, rbase; int zdrop = 0; int o_del = _cudaGapO; int o_ins = _cudaGapO; int e_del = _cudaGapExtend; int e_ins = _cudaGapExtend; eh_t eh[MAX_QUERY_LEN]; // score array int j, oe_del = o_del + e_del, oe_ins = o_ins + e_ins, beg, end, max, max_i, max_j, max_ie, gscore, max_off; for(int i = 0; i < MAX_QUERY_LEN; i++) { eh[i].h = 0; eh[i].e = 0; } // fill the first row eh[0].h = h0; eh[1].h = h0 > oe_ins ? h0 - oe_ins : 0; for (j = 2; j <= qlen && eh[j - 1].h > e_ins; ++j) eh[j].h = eh[j - 1].h - e_ins; // DP loop max = h0, max_i = max_j = -1; max_ie = -1, gscore = -1; max_off = 0; beg = 0, end = qlen; for (uint32_t target_tile_id = 0; target_tile_id < target_batch_regs; target_tile_id++) //target_batch sequence in rows { gpac = packed_target_batch[packed_target_batch_idx + target_tile_id];//load 8 packed bases from target_batch sequence for (uint32_t target_base_id = 0; target_base_id < TILE_SIDE; target_base_id++) { int i = target_tile_id * TILE_SIDE + target_base_id; if (i >= tlen) // skip padding break; gbase = (gpac >> (32 - (target_base_id+1)*4 )) & 0x0F; /* get a base from target_batch sequence */ int t, f = 0, h1, m = 0, mj = -1; // compute the first column if (beg == 0) { h1 = h0 - (o_del + e_del * (i + 1)); if (h1 < 0) h1 = 0; } else h1 = 0; for(uint32_t query_tile_id = 0; (query_tile_id < query_batch_regs); query_tile_id++) { rpac = packed_query_batch[packed_query_batch_idx + query_tile_id];//load 8 bases from query_batch sequence for(uint32_t query_base_id = 0; (query_base_id < TILE_SIDE); query_base_id++) { j = query_tile_id * TILE_SIDE + query_base_id; if (j < beg) continue; if (j >= end) break; rbase = (rpac >> (32 - (query_base_id+1)*4 )) & 0x0F;//get a base from query_batch sequence // At the beginning of the loop: eh[j] = { H(i-1,j-1), E(i,j) }, f = F(i,j) and h1 = H(i,j-1) // Similar to SSE2-SW, cells are computed in the following order: // H(i,j) = max{H(i-1,j-1)+S(i,j), E(i,j), F(i,j)} // E(i+1,j) = max{H(i,j)-gapo, E(i,j)} - gape // F(i,j+1) = max{H(i,j)-gapo, F(i,j)} - gape eh_t *p = &eh[j]; int h, M = p->h, e = p->e; // get H(i-1,j-1) and E(i-1,j) p->h = h1; // set H(i,j-1) for the next row subScore = DEV_GET_SUB_SCORE_LOCAL(rbase, gbase); M = M ? M + subScore : 0; // separating H and M to disallow a cigar like "100M3I3D20M" h = M > e ? M : e; // e and f are guaranteed to be non-negative, so h>=0 even if M<0 h = h > f ? h : f; h1 = h; // save H(i,j) to h1 for the next column mj = m > h ? mj : j; // record the position where max score is achieved m = m > h ? m : h; // m is stored at eh[mj+1] t = M - oe_del; t = t > 0 ? t : 0; e -= e_del; e = e > t ? e : t; // computed E(i+1,j) p->e = e; // save E(i+1,j) for the next row t = M - oe_ins; t = t > 0 ? t : 0; f -= e_ins; f = f > t ? f : t; // computed F(i,j+1) } } eh[end].h = h1; eh[end].e = 0; if (j == qlen) { max_ie = gscore > h1 ? max_ie : i; gscore = gscore > h1 ? gscore : h1; } if (m == 0) break; if (m > max) { max = m, max_i = i, max_j = mj; max_off = max_off > abs(mj - i) ? max_off : abs(mj - i); } else if (zdrop > 0) { if (i - max_i > mj - max_j) { if (max - m - ((i - max_i) - (mj - max_j)) * e_del > zdrop) break; } else { if (max - m - ((mj - max_j) - (i - max_i)) * e_ins > zdrop) break; } } /* This is defining from where to start the next row and where to end the computation of next row it skips some of the cells in the beginning and in the end of the row */ // update beg and end for the next round // COULD be done over a constant value... for (j = beg; (j < end) && eh[j].h == 0 && eh[j].e == 0; ++j) ; beg = j; for (j = end; (j >= beg) && eh[j].h == 0 && eh[j].e == 0; --j) ; end = j + 2 < qlen ? j + 2 : qlen; //beg = 0; end = qlen; // uncomment this line for debugging } } if (gscore <= 0 || gscore <= max - PEN_CLIP5) { device_res->aln_score[tid] = max; device_res->query_batch_end[tid] = max_j + 1; device_res->target_batch_end[tid] = max_i + 1; } else { device_res->aln_score[tid] = gscore; device_res->query_batch_end[tid] = qlen; device_res->target_batch_end[tid] = max_ie + 1; } }
4050c735784b8fc29c04a08c3e758b94d2f9e7d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 10 __global__ void addvec(int *a, int *b, int *c) { int tid=threadIdx.x; // Manejar todos los datos con este ndice if(tid<N) { c[tid]=a[tid]+b[tid]; } } int main(void) { int a[N], b[N], c[N],i; int *dev_a, *dev_b, *dev_c; //Asignacin de espacio en la memoria de GPU hipMalloc((void**)&dev_a,N*sizeof(int)); hipMalloc((void**)&dev_b,N*sizeof(int)); hipMalloc((void**)&dev_c,N*sizeof(int)); //Inicializar los datos originales en el CPU for (i = 0; i < N; i++) { a[i]=i*2; b[i]=i*2+1; } //Copia de vectores a la GPU hipMemcpy(dev_a,a,N*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_b,b,N*sizeof(int),hipMemcpyHostToDevice); //Se lanza el kernel hipLaunchKernelGGL(( addvec), dim3(1),dim3(N), 0, 0, dev_a,dev_b,dev_c); //Se recuperan los datos de la GPU hipMemcpy(c,dev_c,N*sizeof(int),hipMemcpyDeviceToHost); //Se muestra el resultado for (i = 0; i < N; i++) { printf("%d+%d=%d\n",a[i],b[i],c[i]); } hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
4050c735784b8fc29c04a08c3e758b94d2f9e7d0.cu
#include <stdio.h> #define N 10 __global__ void addvec(int *a, int *b, int *c) { int tid=threadIdx.x; // Manejar todos los datos con este índice if(tid<N) { c[tid]=a[tid]+b[tid]; } } int main(void) { int a[N], b[N], c[N],i; int *dev_a, *dev_b, *dev_c; //Asignación de espacio en la memoria de GPU cudaMalloc((void**)&dev_a,N*sizeof(int)); cudaMalloc((void**)&dev_b,N*sizeof(int)); cudaMalloc((void**)&dev_c,N*sizeof(int)); //Inicializar los datos originales en el CPU for (i = 0; i < N; i++) { a[i]=i*2; b[i]=i*2+1; } //Copia de vectores a la GPU cudaMemcpy(dev_a,a,N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,N*sizeof(int),cudaMemcpyHostToDevice); //Se lanza el kernel addvec<<<1,N>>>(dev_a,dev_b,dev_c); //Se recuperan los datos de la GPU cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost); //Se muestra el resultado for (i = 0; i < N; i++) { printf("%d+%d=%d\n",a[i],b[i],c[i]); } cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
b22bd964266fdd23119d5aeba6cc7f654df2b7bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "LBM_GPU.cuh" ifstream fin_GPU("in_GPU.txt"); ofstream fout_GPU("out_GPU.dat"); ofstream fout_GPU_Cd("out_GPU_Cd.dat"); ofstream fout_GPU_Ux0("out_GPU_Ux0.dat"); ofstream fout_GPU_Ux("out_GPU_Ux.dat"); LBM_GPU::LBM_GPU() { // ============================================================================ // // LOAD THE PARAMETERS // ============================================================================ // fin_GPU >> nx; fin_GPU >> comment; fin_GPU >> ny; fin_GPU >> comment; fin_GPU >> Lx; fin_GPU >> comment; fin_GPU >> Ly; fin_GPU >> comment; fin_GPU >> a; fin_GPU >> comment; fin_GPU >> rho1; fin_GPU >> comment; fin_GPU >> BLOCK_SIZE_X; fin_GPU >> comment; fin_GPU >> BLOCK_SIZE_Y; fin_GPU >> comment; fin_GPU >> BLOCK_SIZE_Z; fin_GPU >> comment; fin_GPU >> D; fin_GPU >> comment; fin_GPU >> Um_p; fin_GPU >> comment; fin_GPU >> tau; fin_GPU >> comment; fin_GPU >> nu_p; fin_GPU >> comment; // ============================================================================ // // ============================================================================ // // NEW & CUDAMALLOC // ============================================================================ // is_boundary_node = new int[nx*ny]; hipMalloc((void**)&d_is_boundary_node, nx*ny * sizeof(int)); is_solid_node = new int[nx*ny]; hipMalloc((void**)&d_is_solid_node, nx*ny * sizeof(int)); is_solid_near_node = new int[nx*ny]; U = new float[nx*ny]; hipMalloc((void**)&d_U, nx*ny * sizeof(float)); Ux = new float[nx*ny]; hipMalloc((void**)&d_Ux, nx*ny * sizeof(float)); Uy = new float[nx*ny]; hipMalloc((void**)&d_Uy, nx*ny * sizeof(float)); rho = new float[nx*ny]; hipMalloc((void**)&d_rho, nx*ny * sizeof(float)); UN = new float[nx*ny]; hipMalloc((void**)&d_UN, nx*ny * sizeof(float)); UxN = new float[nx*ny]; hipMalloc((void**)&d_UxN, nx*ny * sizeof(float)); UyN = new float[nx*ny]; hipMalloc((void**)&d_UyN, nx*ny * sizeof(float)); rhoN = new float[nx*ny]; hipMalloc((void**)&d_rhoN, nx*ny * sizeof(float)); f = new float[nx*ny*a]; hipMalloc((void**)&d_f, nx*ny*a * sizeof(float)); ftemp = new float[nx*ny*a]; hipMalloc((void**)&d_ftemp, nx*ny*a * sizeof(float)); fN = new float[nx*ny*a]; hipMalloc((void**)&d_fN, nx*ny*a * sizeof(float)); feq = new float[nx*ny*a]; hipMalloc((void**)&d_feq, nx*ny*a * sizeof(float)); ex = new float[a]; hipMalloc((void**)&d_ex, a * sizeof(float)); ey = new float[a]; hipMalloc((void**)&d_ey, a * sizeof(float)); U_p = new float[nx*ny]; Ux_p = new float[nx*ny]; Uy_p = new float[nx*ny]; P = new float[nx*ny]; Ux0_p = new float[ny]; Ux0 = new float[ny]; hipMalloc((void**)&d_Ux0, ny * sizeof(float)); // ============================================================================ // // ============================================================================ // // MICROSCOPIC VELOCITY // ============================================================================ // ex[0] = 0.0, ey[0] = 0.0; ex[1] = 1.0, ey[1] = 0.0; ex[2] = 0.0, ey[2] = 1.0; ex[3] = -1.0, ey[3] = 0.0; ex[4] = 0.0, ey[4] = -1.0; ex[5] = 1.0, ey[5] = 1.0; ex[6] = -1.0, ey[6] = 1.0; ex[7] = -1.0, ey[7] = -1.0; ex[8] = 1.0, ey[8] = -1.0; hipMemcpy(d_ex, ex, a * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_ey, ey, a * sizeof(float), hipMemcpyHostToDevice); // ============================================================================ // // ============================================================================ // // SET BOUNDARY NODE // ============================================================================ // sIm = nx / Lx * 0.15; sIM = nx / Lx * (0.15 + D) - 1; sJm = ny / Ly * 0.15; sJM = ny / Ly * (0.15 + D) - 1; snx = (sIM - sIm) + 1; sny = (sJM - sJm) + 1; sn = 0; ic = (float)sIm + ((float)sIM - (float)sIm) / 2; jc = (float)sJm + ((float)sJM - (float)sJm) / 2; r = ((float)sIM - (float)sIm) / 2; cout << "sIm = " << sIm << endl; cout << "sIM = " << sIM << endl; cout << "sJm = " << sJm << endl; cout << "sJM = " << sJM << endl; cout << "snx = " << snx << endl; cout << "sny = " << sny << endl; cout << "ic = " << ic << endl; cout << "jc = " << jc << endl; cout << "r = " << r << endl; //set boundary node for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { if (i == 0 || i == nx - 1 || j == 0 || j == ny - 1) is_boundary_node[i + nx*j] = 1; else is_boundary_node[i + nx*j] = 0; } } //Binary data /*for (i = 0; i < nx; i++) { for (j = ny - 1; j > -1; j--) { if ((i >= sIm && i <= sIM) && (j >= sJm && j <= sJM)) fin_grid_GPU >> is_solid_node[i + nx*j]; else is_solid_node[i + nx*j] = 0; if (is_solid_node[i + nx*j]) sn = sn + 1; } }*/ //set solid node for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { dist = sqrt(pow((float)i - ic, 2) + pow((float)j - jc, 2)); if (dist <= r) is_solid_node[i + nx*j] = 1; else is_solid_node[i + nx*j] = 0; if (is_solid_node[i + nx*j]) sn = sn + 1; } } //set near solid node for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { is_solid_near_node[i + nx*j] = 0; in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; if (!is_boundary_node[i + nx*j]) { if (!is_solid_node[i + nx*j]) { if (is_solid_node[ip + nx*j]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[i + nx*jp]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[in + nx*j]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[i + nx*jn]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[ip + nx*jp]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[in + nx*jp]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[in + nx*jn]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[ip + nx*jn]) { is_solid_near_node[i + nx*j] = 1; } } } } } hipMemcpy(d_is_boundary_node, is_boundary_node, nx*ny * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_is_solid_node, is_solid_node, nx*ny * sizeof(int), hipMemcpyHostToDevice); // ============================================================================ // // ============================================================================ // // SET PARAMETERS & INITIAL CONDITION // ============================================================================ // del_x = 1.0; del_y = 1.0; del_t = 1.0; c = del_y / del_t; c_s = (1.0 / sqrt(3.0))*c; del_x_p = D / (float)snx; del_y_p = D / (float)sny; // del_t_p = pow(del_y_p, 2); // del_t_p = 0.000013; //Uniform /*nu_p = 0.06 * (del_y_p / del_t_p) * D / Re; nu = (del_t_p / pow(del_y_p, 2))*nu_p; tau = (1.0 / pow(c_s, 2))*nu + (0.5*del_t);*/ //Input Reynolds number and del_t //Um = Um_p * (del_t_p / del_y_p); //nu_p = (2.0 / 3.0) * Um_p * D / Re; //nu = (del_t_p / pow(del_y_p, 2))*nu_p; //tau = (1.0 / pow(c_s, 2))*nu + (0.5*del_t); //Input tau and kinematic viscosity del_t_p = pow(c_s, 2)*(tau - 0.5)*pow(del_y_p, 2) / nu_p; Re = (2.0 / 3.0) * Um_p * D / nu_p; Um = Um_p * del_t_p / del_y_p; nu = nu_p * del_t_p / pow(del_y_p, 2); cout << endl; cout << "// =================== Stability condition ================ //" << endl; cout << "Check 1. [tau > 0.5]" << endl; cout << "tau = " << tau << endl; cout << "Check 2. Mach number condition [Ma = Uavg/c_s << 1]" << endl; cout << "Ma = " << (2.0/3.0)*Um/c_s << endl; cout << "Check 3. BGK Stability. [If tau < 0.55, tau > 0.5 + 0.125*Uavg]" << endl; cout << "tau = " << tau << " > " << 0.5 + 0.125*(2.0 / 3.0)*Um << endl; cout << "// ======================================================== //" << endl; //intitalize variables for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { Ux[i + nx*j] = 0.0; Uy[i + nx*j] = 0.0; U[i + nx*j] = 0.0; UxN[i + nx*j] = 0.0; UyN[i + nx*j] = 0.0; UN[i + nx*j] = 0.0; P[i + nx*j] = 0.0; for (k = 0; k < a; k++) { ftemp[i + nx*j + nx*ny*k] = 0.0; feq[i + nx*j + nx*ny*k] = 0.0; fN[i + nx*j + nx*ny*k] = 0.0; } if (!is_solid_node[i + nx*j]) rho[i + nx*j] = 1.0; else rho[i + nx*j] = 1.0; f[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j]; } } hipMemcpy(d_rho, rho, nx*ny * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_f, f, nx*ny*a * sizeof(float), hipMemcpyHostToDevice); //set velocity profile at inlet for (j = 0; j < ny; j++) { Ux0_p[j] = 4.0*Um_p / (pow(Ly, 2))*(((float)j + 1) - 0.5)*del_y_p*(Ly - (((float)j + 1) - 0.5)*del_y_p); // Ux0[j] = 4.0*Um / (pow(ny, 2))*(((float)j + 1) - 0.5)*del_y*(ny - (((float)j + 1) - 0.5)*del_y); Ux0[j] = Ux0_p[j] * (del_t_p / del_y_p); fout_GPU_Ux0 << Ux0[j] << endl; } hipMemcpy(d_Ux0, Ux0, ny * sizeof(float), hipMemcpyHostToDevice); // ============================================================================ // } __global__ void Kernel_Streaming(float* f, float* ftemp, int* is_boundary_node, int* is_solid_node, int nx, int ny, int a, float ic, float jc, float r) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; int in, ip, jn, jp; in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; float dist = sqrt(pow((float)i - ic, 2) + pow((float)j - jc, 2)); float q = dist - r; if (!is_boundary_node[i + nx*j]) { if (!is_solid_node[i + nx*j]) { ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; if (!is_solid_node[ip + nx*j]) ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 3] = 2.0 * q * f[i + nx*j + nx*ny * 1] + (1.0 - 2.0*q)*f[(i - 1) + nx*j + nx*ny * 1]; else ftemp[i + nx*j + nx*ny * 3] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 1] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 3]; } if (!is_solid_node[i + nx*jp]) ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 4] = 2.0 * q * f[i + nx*j + nx*ny * 2] + (1.0 - 2.0*q)*f[i + nx*(j - 1) + nx*ny * 2]; else ftemp[i + nx*j + nx*ny * 4] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 2] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 4]; } if (!is_solid_node[in + nx*j]) ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 1] = 2.0 * q * f[i + nx*j + nx*ny * 3] + (1.0 - 2.0*q)*f[(i + 1) + nx*j + nx*ny * 3]; else ftemp[i + nx*j + nx*ny * 1] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 3] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 1]; } if (!is_solid_node[i + nx*jn]) ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 2] = 2.0 * q * f[i + nx*j + nx*ny * 4] + (1.0 - 2.0*q)*f[i + nx*(j + 1) + nx*ny * 4]; else ftemp[i + nx*j + nx*ny * 2] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 4] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 2]; } if (!is_solid_node[ip + nx*jp]) ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 7] = 2.0 * q * f[i + nx*j + nx*ny * 5] + (1.0 - 2.0*q)*f[(i - 1) + nx*(j - 1) + nx*ny * 5]; else ftemp[i + nx*j + nx*ny * 7] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 5] + (2.0*q - 1) / (2.0*q)*f[i + nx*j + nx*ny * 7]; } if (!is_solid_node[in + nx*jp]) ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 8] = 2.0 * q * f[i + nx*j + nx*ny * 6] + (1.0 - 2.0*q)*f[(i + 1) + nx*(j - 1) + nx*ny * 6]; else ftemp[i + nx*j + nx*ny * 8] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 6] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 8]; } if (!is_solid_node[in + nx*jn]) ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 5] = 2.0 * q * f[i + nx*j + nx*ny * 7] + (1.0 - 2.0*q)*f[(i + 1) + nx*(j + 1) + nx*ny * 7]; else ftemp[i + nx*j + nx*ny * 5] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 7] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 5]; } if (!is_solid_node[ip + nx*jn]) ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 6] = 2.0 * q * f[i + nx*j + nx*ny * 8] + (1.0 - 2.0*q)*f[(i - 1) + nx*(j + 1) + nx*ny * 8]; else ftemp[i + nx*j + nx*ny * 6] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 8] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 6]; } } } else { if ((i == 0) && (j > 0 && j < ny - 1)) { //INLET ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; } else if ((i > 0 && i < nx - 1) && (j == ny - 1)) { //TOP ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; } else if ((i > 0 && i < nx - 1) && (j == 0)) { //BOTTOM ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; } else if ((i == nx - 1) && (j > 0 && j < ny - 1)) { //OUTLET ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; } else if ((i == 0) && (j == 0)) { //BOTTOM-LEFT ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; } else if ((i == 0) && (j == ny - 1)) { //TOP-LEFT ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; } else if ((i == nx - 1) && (j == ny - 1)) { //TOP-RIGHT ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; } else if ((i == nx - 1) && (j == 0)) { //BOTTOM-RIGHT ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; } } } void LBM_GPU::Streaming() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Streaming << < dimGrid, dimBlock >> > (d_f, d_ftemp, d_is_boundary_node, d_is_solid_node, nx, ny, a, ic, jc, r); } __global__ void Kernel_BC_bounceback(float* f, float* ftemp, float* rho, float* Ux, float* Uy, float* Ux0, float rho1, int nx, int ny, int a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; float rho0, ru, Ux1, Uy1, rho_extra, Ux_extra, Uy_extra; // ============================================================================ // // TOP BOUNDARY (HALF-AWAY BOUNCEBACK) // ============================================================================ // if ((i > 0 && i < nx - 1) && (j == ny - 1)){ //Bounce-back boundary ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; //Periodic boundary /*ftemp[i + nx*0 + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[(i + 1) + nx*0 + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; ftemp[(i - 1) + nx*0 + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; */ //Velocity boundary(first order) /*rho_extra = rho[i + nx*(j - 1)] + 0.5 * (rho[i + nx*(j - 1)] - rho[i + nx*(j - 2)]); Ux_extra = Ux[i + nx*(j - 1)]; ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru;*/ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[i + nx*(j - 1) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*(j - 1) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*(j - 1) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j - 1) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*(j - 1) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*(j - 1) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*(j - 1) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*(j - 1) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*(j - 1) + nx*ny * 8]; */ //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[i + nx*(j - 1) + nx*ny * 0] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 0] - ftemp[i + nx*(j - 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*(j - 1) + nx*ny * 1] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 1] - ftemp[i + nx*(j - 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*(j - 1) + nx*ny * 2] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 2] - ftemp[i + nx*(j - 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j - 1) + nx*ny * 3] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 3] - ftemp[i + nx*(j - 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*(j - 1) + nx*ny * 4] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 4] - ftemp[i + nx*(j - 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*(j - 1) + nx*ny * 5] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 5] - ftemp[i + nx*(j - 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*(j - 1) + nx*ny * 6] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 6] - ftemp[i + nx*(j - 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*(j - 1) + nx*ny * 7] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 7] - ftemp[i + nx*(j - 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*(j - 1) + nx*ny * 8] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 8] - ftemp[i + nx*(j - 2) + nx*ny * 8]); */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 4] = 2.0 * ftemp[i + nx*(j - 1) + nx*ny * 4] - ftemp[i + nx*(j - 2) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 7] = 2.0 * ftemp[i + nx*(j - 1) + nx*ny * 7] - ftemp[i + nx*(j - 2) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = 2.0 * ftemp[i + nx*(j - 1) + nx*ny * 8] - ftemp[i + nx*(j - 2) + nx*ny * 8]; */ //Equilibrium /*float c = 1; ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); */ //NEBB method /*ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] + (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 1] - ftemp[i + nx*j + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] - (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 1] - ftemp[i + nx*j + nx*ny * 3]); */ } // ============================================================================ // // ============================================================================ // // BOTTOM BOUNDARY (HALF-AWAY BOUNCEBACK) // ============================================================================ // if ((i > 0 && i < nx - 1) && (j == 0)){ //Bounce-back boundary ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; //Periodic boundary /*ftemp[i + nx*(ny - 1) + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[(i - 1) + nx*(ny - 1) + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; ftemp[(i + 1) + nx*(ny - 1) + nx*ny * 8] = f[i + nx*j + nx*ny * 8];*/ //Velocity boundary(first order) /*rho_extra = rho[i + nx*(j + 1)] + 0.5 * (rho[i + nx*(j + 1)] - rho[i + nx*(j + 2)]); Ux_extra = Ux[i + nx*(j + 1)]; ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0)*ru;*/ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[i + nx*(j + 1) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*(j + 1) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*(j + 1) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j + 1) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*(j + 1) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*(j + 1) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*(j + 1) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*(j + 1) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*(j + 1) + nx*ny * 8]; */ //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[i + nx*(j + 1) + nx*ny * 0] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 0] - ftemp[i + nx*(j + 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*(j + 1) + nx*ny * 1] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 1] - ftemp[i + nx*(j + 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*(j + 1) + nx*ny * 2] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 2] - ftemp[i + nx*(j + 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j + 1) + nx*ny * 3] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 3] - ftemp[i + nx*(j + 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*(j + 1) + nx*ny * 4] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 4] - ftemp[i + nx*(j + 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*(j + 1) + nx*ny * 5] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 5] - ftemp[i + nx*(j + 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*(j + 1) + nx*ny * 6] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 6] - ftemp[i + nx*(j + 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*(j + 1) + nx*ny * 7] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 7] - ftemp[i + nx*(j + 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*(j + 1) + nx*ny * 8] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 8] - ftemp[i + nx*(j + 2) + nx*ny * 8]); */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 2] = 2.0 * ftemp[i + nx*(j + 1) + nx*ny * 2] - ftemp[i + nx*(j + 2) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 5] = 2.0 * ftemp[i + nx*(j + 1) + nx*ny * 5] - ftemp[i + nx*(j + 2) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = 2.0 * ftemp[i + nx*(j + 1) + nx*ny * 6] - ftemp[i + nx*(j + 2) + nx*ny * 6]; */ //Equilibrium /*float c = 1; ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); */ //NEBB method /*ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 3] - ftemp[i + nx*j + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 3] - ftemp[i + nx*j + nx*ny * 1]); */ } // ============================================================================ // // ============================================================================ // // LEFT BOUNDARY (VELOCITY) // ============================================================================ // if ((i == 0) && (j > 0 && j < ny - 1)) { /*rho0 = rho[(i + 1) + nx*j] + 0.5*(rho[(i + 1) + nx*j] - rho[(i + 2) + nx*j]); ru = rho0 * Ux0; ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru;*/ /*rho0 = ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 4] + 2.0*(ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 6]); ru = rho0 * Ux0[j]; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru;*/ //Zou - He boundary rho0 = (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 4] + 2.0*(ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7])) / (1.0 - Ux0[j]); ru = rho0 * Ux0[j]; ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru - (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru + (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); //wet-node method // rho0 = (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 4] // + 2.0*(ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7])) / (1.0 - Ux0[j]); // ru = rho0 * Ux0[j]; //// ru = rho0 * 0.06; // // ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; // ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru - (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); // ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru + (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); //Equilibrium /*float c = 1; ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4)) * pow(Ux0[j], 2) - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4))*pow(Ux0[j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); */ } // ============================================================================ // // ============================================================================ // // RIGHT BOUNDARY (EXTRAPOLATION) // ============================================================================ // if ((i == nx - 1) && (j > 0 && j < ny - 1)) { //Extrapolation // ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*j + nx*ny * 0] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 0] - ftemp[(i - 2) + nx*j + nx*ny * 0]); // ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*j + nx*ny * 1] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 1] - ftemp[(i - 2) + nx*j + nx*ny * 1]); // ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*j + nx*ny * 2] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 2] - ftemp[(i - 2) + nx*j + nx*ny * 2]); // ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*j + nx*ny * 3] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 3] - ftemp[(i - 2) + nx*j + nx*ny * 3]); // ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*j + nx*ny * 4] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 4] - ftemp[(i - 2) + nx*j + nx*ny * 4]); // ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*j + nx*ny * 5] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 5] - ftemp[(i - 2) + nx*j + nx*ny * 5]); // ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*j + nx*ny * 6] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 6] - ftemp[(i - 2) + nx*j + nx*ny * 6]); // ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*j + nx*ny * 7] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 7] - ftemp[(i - 2) + nx*j + nx*ny * 7]); // ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*j + nx*ny * 8] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 8] - ftemp[(i - 2) + nx*j + nx*ny * 8]); //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 0] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 0] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 0] + ftemp[(i - 3) + nx*j + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 1] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 1] + ftemp[(i - 3) + nx*j + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 2] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 2] + ftemp[(i - 3) + nx*j + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 3] + ftemp[(i - 3) + nx*j + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 4] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 4] + ftemp[(i - 3) + nx*j + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 5] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 5] + ftemp[(i - 3) + nx*j + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 6] + ftemp[(i - 3) + nx*j + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 7] + ftemp[(i - 3) + nx*j + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 8] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 8] + ftemp[(i - 3) + nx*j + nx*ny * 8]); */ //Extrapolation type2 /*ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - ftemp[(i - 2) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - ftemp[(i - 2) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - ftemp[(i - 2) + nx*j + nx*ny * 7];*/ //Extrapolation first order ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*j + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*j + nx*ny * 8]; //Extrapolation second order /*ftemp[i + nx*j + nx*ny * 0] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 0] - ftemp[(i - 2) + nx*j + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 1] - ftemp[(i - 2) + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 2] - ftemp[(i - 2) + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - ftemp[(i - 2) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 4] - ftemp[(i - 2) + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 5] - ftemp[(i - 2) + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - ftemp[(i - 2) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - ftemp[(i - 2) + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 8] - ftemp[(i - 2) + nx*j + nx*ny * 8];*/ //Velocity boundary (first order) /*rho_extra = rho[(i - 1) + nx*j] + 0.5*(rho[(i - 1) + nx*j] - rho[(i - 2) + nx*j]); Ux_extra = Ux[(i - 1) + nx*j]; Uy_extra = Uy[(i - 1) + nx*j]; ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1] - (2.0 / 3.0)*rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0)*rho_extra*(Ux_extra - Uy_extra); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0)*rho_extra*(Ux_extra + Uy_extra);*/ //Pressure boundary /*Ux1 = Ux[(i - 1) + nx*j] + 0.5*(Ux[(i - 1) + nx*j] - Ux[(i - 2) + nx*j]); Uy1 = Uy[(i - 1) + nx*j] + 0.5*(Uy[(i - 1) + nx*j] - Uy[(i - 2) + nx*j]); ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 - Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 + Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); */ //wet-node method /*Ux1 = -1.0 + (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 4] + 2.0*(ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 8])) / rho1; ru = rho1 * Ux1; ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1] - (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0)*ru -(1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0)*ru +(1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); */ } // ============================================================================ // // ============================================================================ // // TOP-LEFT CORNER (EQUILIBRIUM) // ============================================================================ // if ((i == 0) && (j == ny - 1)) { //case 1 // ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; // ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; //// rho0 = rho[(i + 1) + nx*(j - 1)] + 0.5*(rho[(i + 1) + nx*(j - 1)] - rho[(i + 2) + nx*(j - 2)]); // rho0 = rho[i + nx*(j - 1)]; // ru = rho0 * Ux0[j]; // ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; // ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; // ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; //case 2 //ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; //ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; //ftemp[i + nx*j + nx*ny * 5] = -ftemp[i + nx*j + nx*ny * 7]; //rho0 = rho[(i + 1) + nx*(j - 1)] + 0.5*(rho[(i + 1) + nx*(j - 1)] - rho[(i + 2) + nx*(j - 2)]); //ru = rho0 * Ux0; //ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; //ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; //ftemp[i + nx*j + nx*ny * 0] = rho0 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //case 3 /*ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6];*/ //case 4 /*ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; rho0 = rho[(i + 0) + nx*(j - 1)] + 0.5*(rho[(i + 0) + nx*(j - 1)] - rho[(i + 0) + nx*(j - 2)]); ru = rho0 * Ux0[j]; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; */ //case 5 //// rho0 = rho[(i + 0) + nx*(j - 1)] + 0.5*(rho[(i + 0) + nx*(j - 1)] - rho[(i + 0) + nx*(j - 2)]); // rho0 = rho[i + nx*(j - 1)]; //// rho0 = 1.0; // ru = rho0 * Ux0[j]; //// ru = rho0* 0.005; // // ftemp[i + nx*j + nx*ny * 7] = -(1.0 / 12.0) * ru; // ftemp[i + nx*j + nx*ny * 5] = (1.0 / 12.0) * ru; // // ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; // ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; // ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; // // ftemp[i + nx*j + nx*ny * 0] = rho0 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //Periodic + Velocity /*ftemp[i + nx*0 + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[(i + 1) + nx*0 + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; rho0 = rho[(i + 1) + nx*(j - 1)] + 0.5*(rho[(i + 1) + nx*(j - 1)] - rho[(i + 2) + nx*(j - 2)]); ru = rho0 * Ux0; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru;*/ //wet-node method // rho0 = rho[i + nx*(j - 1)]; //// rho0 = 1.001; //// rho0 = rho[(i + 0) + nx*(j - 1)] + 0.5*(rho[(i + 0) + nx*(j - 1)] - rho[(i + 0) + nx*(j - 2)]); //// rho0 = rho1; // // ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3]; // ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; // ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; // ftemp[i + nx*j + nx*ny * 5] = 0.5 * (rho0 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] // + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 8])); // ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; //Equilibrium float c = 1; /*ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4)) * pow(Ux0[j], 2) - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4))*pow(Ux0[j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); */ ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); } // ============================================================================ // // ============================================================================ // // BOTTOM-LEFT CORNER (EQUILIBRIUM) // ============================================================================ // if ((i == 0) && (j == 0)) { //case 1 // ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; // ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; //// rho0 = rho[(i + 1) + nx*(j + 1)] + 0.5*(rho[(i + 1) + nx*(j + 1)] - rho[(i + 2) + nx*(j + 2)]); // rho0 = rho[i + nx*(j + 1)]; // ru = rho0 * Ux0[j]; // ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; // ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; // ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; //case 2 //ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; //ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; //ftemp[i + nx*j + nx*ny * 8] = -ftemp[i + nx*j + nx*ny * 6]; //rho0 = rho[(i + 1) + nx*(j + 1)] + 0.5*(rho[(i + 1) + nx*(j + 1)] - rho[(i + 2) + nx*(j + 2)]); //ru = rho0 * Ux0; //ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; //ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; // //ftemp[i + nx*j + nx*ny * 0] = rho0 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //case 3 /*ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; */ //case 4 /*ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; rho0 = rho[(i + 0) + nx*(j + 1)] + 0.5*(rho[(i + 0) + nx*(j + 1)] - rho[(i + 0) + nx*(j + 2)]); ru = rho0 * Ux0[j]; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; */ //case 5 //// rho0 = rho[(i + 0) + nx*(j + 1)] + 0.5*(rho[(i + 0) + nx*(j + 1)] - rho[(i + 0) + nx*(j + 2)]); // rho0 = rho[i + nx*(j + 1)]; //// rho0 = 1.0; // ru = rho0 * Ux0[j]; //// ru = rho0* 0.005; // // ftemp[i + nx*j + nx*ny * 6] = -(1.0 / 12.0) * ru; // ftemp[i + nx*j + nx*ny * 8] = (1.0 / 12.0) * ru; // // ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; // ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; // ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; // // ftemp[i + nx*j + nx*ny * 0] = rho0 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //Periodic + Velocity /*ftemp[i + nx*(ny - 1) + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[(i + 1) + nx*(ny - 1) + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; rho0 = rho[(i + 1) + nx*(j + 1)] + 0.5*(rho[(i + 1) + nx*(j + 1)] - rho[(i + 2) + nx*(j + 2)]); ru = rho0 * Ux0; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; */ //wet-node method // rho0 = rho[i + nx*(j + 1)]; //// rho0 = 1.001; //// rho0 = rho[(i + 0) + nx*(j + 1)] + 0.5*(rho[(i + 0) + nx*(j + 1)] - rho[(i + 0) + nx*(j + 2)]); //// rho0 = rho1; // // ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3]; // ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; // ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; // ftemp[i + nx*j + nx*ny * 6] = 0.5 * (rho0 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] // + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 7])); // ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; //Equilibrium float c = 1; /*ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4)) * pow(Ux0[j], 2) - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4))*pow(Ux0[j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); */ ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); } // ============================================================================ // // ============================================================================ // // TOP-RIGHT CORNER (EQUILIBRIUM) // ============================================================================ // if ((i == nx - 1) && (j == ny - 1)) { //case 1 /*ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; Ux1 = Ux[(i - 1) + nx*(j - 1)] + 0.5*(Ux[(i - 1) + nx*(j - 1)] - Ux[(i - 2) + nx*(j - 2)]); Uy1 = Uy[(i - 1) + nx*(j - 1)] + 0.5*(Uy[(i - 1) + nx*(j - 1)] - Uy[(i - 2) + nx*(j - 2)]); ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 - Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 + Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1));*/ //case 2 //ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; //ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; //ftemp[i + nx*j + nx*ny * 6] = -ftemp[i + nx*j + nx*ny * 8]; //Ux1 = Ux[(i - 1) + nx*(j - 1)] + 0.5*(Ux[(i - 1) + nx*(j - 1)] - Ux[(i - 2) + nx*(j - 2)]); //ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); //ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); //ftemp[i + nx*j + nx*ny * 0] = rho1 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //case 3 /*ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5];*/ //case 4 /*ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; Ux1 = Ux[(i - 1) + nx*(j - 1)] + 0.5*(Ux[(i - 1) + nx*(j - 1)] - Ux[(i - 2) + nx*(j - 2)]); ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); */ //case 5 // Ux1 = Ux[(i - 1) + nx*(j - 0)]; //// rho_extra = rho[(i - 1) + nx*(j - 1)] + 0.5*(rho[(i - 1) + nx*(j - 1)] - rho[(i - 2) + nx*(j - 2)]); // rho_extra = rho[(i - 1) + nx*(j - 0)]; // ru = rho_extra * Ux1; // // ftemp[i + nx*j + nx*ny * 8] = (1.0 / 12.0) * ru; // ftemp[i + nx*j + nx*ny * 6] = -(1.0 / 12.0) * ru; // // ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; // ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1] - (2.0 / 3.0) * ru; // ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0) * ru; // // ftemp[i + nx*j + nx*ny * 0] = rho1 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //Periodic + Bounce back /*ftemp[i + nx*0 + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[(i - 1) + nx*0 + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; Ux1 = Ux[(i - 1) + nx*(j - 1)]; Uy1 = Uy[(i - 1) + nx*(j - 1)]; ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 - Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 + Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); */ //Periodic + Extrapolation /*ftemp[i + nx * 0 + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[(i - 1) + nx * 0 + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; */ //wet-node method //// rho0 = rho[(i - 1) + nx*(j - 0)]; // rho0 = rho1; //// rho0 = rho[(i - 0) + nx*(j - 1)] + 0.5*(rho[(i - 0) + nx*(j - 1)] - rho[(i - 0) + nx*(j - 2)]); // // ftemp[i + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 1]; // ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; // ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; // ftemp[i + nx*j + nx*ny * 6] = 0.5 * (rho0 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] // + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 7])); // ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; //Equilibrium float c = 1; ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); } // ============================================================================ // // ============================================================================ // // BOTTOM-RIGHT CORNER (EQUILIBRIUM) // ============================================================================ // if ((i == nx - 1) && (j == 0)) { //case 1 /*ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; Ux1 = Ux[(i - 1) + nx*(j + 1)] + 0.5*(Ux[(i - 1) + nx*(j + 1)] - Ux[(i - 2) + nx*(j + 2)]); Uy1 = Uy[(i - 1) + nx*(j + 1)] + 0.5*(Uy[(i - 1) + nx*(j + 1)] - Uy[(i - 2) + nx*(j + 2)]); ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 - Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 + Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); */ //case 2 //ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; //ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; //ftemp[i + nx*j + nx*ny * 7] = -ftemp[i + nx*j + nx*ny * 5]; //Ux1 = Ux[(i - 1) + nx*(j + 1)] + 0.5*(Ux[(i - 1) + nx*(j + 1)] - Ux[(i - 2) + nx*(j + 2)]); //ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); //ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); // //ftemp[i + nx*j + nx*ny * 0] = rho1 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //case 3 /*ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5];*/ //case 4 /*ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; Ux1 = Ux[(i - 1) + nx*(j + 1)] + 0.5*(Ux[(i - 1) + nx*(j + 1)] - Ux[(i - 2) + nx*(j + 2)]); ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); */ //case 5 // Ux1 = Ux[(i - 1) + nx*(j + 0)]; //// rho_extra = rho[(i - 1) + nx*(j + 1)] + 0.5*(rho[(i - 1) + nx*(j + 1)] - rho[(i - 2) + nx*(j + 2)]); // rho_extra = rho[(i - 1) + nx*(j + 0)]; // // ru = Ux1 * rho_extra; // // ftemp[i + nx*j + nx*ny * 5] = (1.0 / 12.0) * ru; // ftemp[i + nx*j + nx*ny * 7] = -(1.0 / 12.0) * ru; // // ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; // ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1] - (2.0 / 3.0) * ru; // ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0) * ru; // // ftemp[i + nx*j + nx*ny * 0] = rho1 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); // //Periodic + Bounce back /*ftemp[i + nx*(ny - 1) + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[(i - 1) + nx*(ny - 1) + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; Ux1 = Ux[(i - 1) + nx*(j + 1)]; Uy1 = Uy[(i - 1) + nx*(j + 1)]; ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 - Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 + Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1));*/ //Periodic + Extrapolation /*ftemp[i + nx*(ny - 1) + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[(i - 1) + nx*(ny - 1) + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; */ //wet-node method //// rho0 = rho[(i - 1) + nx*(j + 0)]; // rho0 = rho1; //// rho0 = rho[(i - 0) + nx*(j + 1)] + 0.5*(rho[(i - 0) + nx*(j + 1)] - rho[(i - 0) + nx*(j + 2)]); // ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; // ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; // ftemp[i + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 1]; // ftemp[i + nx*j + nx*ny * 5] = 0.5 * (rho0 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] // + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 8])); // ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; //Equilibrium float c = 1; ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); } // ============================================================================ // } void LBM_GPU::BC_bounceback() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_BC_bounceback << < dimGrid, dimBlock >> > (d_f, d_ftemp, d_rho, d_Ux, d_Uy, d_Ux0, rho1, nx, ny, a); } __global__ void Kernel_BC_extra(float* ftemp, float* Ux, float* rho, int nx, int ny, int a, float rho1) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; float ru, Ux_extra, rho_extra; // ============================================================================ // // TOP-LEFT CORNER (VELOCITY & PERIODIC) // ============================================================================ // if ((i == 0) && (j == ny - 1)) { //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 8];*/ //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 0] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 0] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 1] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 1] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 2] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 2] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 3] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 3] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 4] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 4] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 5] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 5] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 6] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 6] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 7] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 7] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 8] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 8] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 8]); */ //Extrapolation 2nd order + moving wall /*ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*(j - 1) + nx*ny * 1] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 1] - ftemp[i + nx*(j - 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*(j - 1) + nx*ny * 5] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 5] - ftemp[i + nx*(j - 2) + nx*ny * 5]); rho_extra = rho[i + nx*(j - 1)] + 0.5 * (rho[i + nx*(j - 1)] - rho[i + nx*(j - 2)]); Ux_extra = Ux[i + nx*(j - 1)] + 0.5 * (Ux[i + nx*(j - 1)] - Ux[i + nx*(j - 2)]); ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 1] = 2.0*ftemp[(i + 1) + nx*(j - 1) + nx*ny * 1] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 5] = 2.0*ftemp[(i + 1) + nx*(j - 1) + nx*ny * 5] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 4] = 2.0*ftemp[(i + 1) + nx*(j - 1) + nx*ny * 4] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i + 1) + nx*(j - 1) + nx*ny * 7] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = 2.0*ftemp[(i + 1) + nx*(j - 1) + nx*ny * 8] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 8]; */ //Zou - He boundary ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 5] = 0.5 * (rho1 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 8])); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; } // ============================================================================ // // ============================================================================ // // BOTTOM-LEFT CORNER (VELOCITY & PERIODIC) // ============================================================================ // if ((i == 0) && (j == 0)) { //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 8];*/ //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 0] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 0] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 1] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 1] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 2] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 2] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 3] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 3] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 4] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 4] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 5] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 5] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 6] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 6] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 7] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 7] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 8] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 8] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 8]); */ //Extrapolation 2nd order + moving wall /*ftemp[i + nx*j + nx*ny * 1] = ftemp[i+ nx*(j + 1) + nx*ny * 1] + 0.5 * (ftemp[i+ nx*(j + 1) + nx*ny * 1] - ftemp[i+ nx*(j + 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 8] = ftemp[i+ nx*(j + 1) + nx*ny * 8] + 0.5 * (ftemp[i+ nx*(j + 1) + nx*ny * 8] - ftemp[i+ nx*(j + 2) + nx*ny * 8]); rho_extra = rho[i + nx*(j + 1)] + 0.5 * (rho[i + nx*(j + 1)] - rho[i + nx*(j + 2)]); Ux_extra = Ux[i + nx*(j + 1)] + 0.5 * (Ux[i + nx*(j + 1)] - Ux[i + nx*(j + 2)]); ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0)*ru; */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 1] = 2.0*ftemp[(i + 1) + nx*(j + 1) + nx*ny * 1] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 8] = 2.0*ftemp[(i + 1) + nx*(j + 1) + nx*ny * 8] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 2] = 2.0*ftemp[(i + 1) + nx*(j + 1) + nx*ny * 2] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 5] = 2.0*ftemp[(i + 1) + nx*(j + 1) + nx*ny * 5] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i + 1) + nx*(j + 1) + nx*ny * 6] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 6]; */ //Zou - He boundary ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 6] = 0.5 * (rho1 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 7])); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; } // ============================================================================ // // ============================================================================ // // TOP-RIGHT CORNER (EXTRAPOLATION & PERIODIC) // ============================================================================ // if ((i == nx - 1) && (j == ny - 1)) { //Extrapolation /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 0] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 0] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 1] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 1] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 2] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 2] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 3] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 3] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 4] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 4] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 5] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 5] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 6] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 6] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 7] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 7] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 8] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 8] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 8]);*/ //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 3] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 3] + ftemp[(i - 3) + nx*j + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 6] + ftemp[(i - 3) + nx*j + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 7] + ftemp[(i - 3) + nx*j + nx*ny * 7]); */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*(j - 1) + nx*ny * 3] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*(j - 1) + nx*ny * 6] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*(j - 1) + nx*ny * 7] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 4] = 2.0*ftemp[(i - 1) + nx*(j - 1) + nx*ny * 4] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 8] = 2.0*ftemp[(i - 1) + nx*(j - 1) + nx*ny * 8] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 8]; */ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*j + nx*ny * 7]; */ //Extrapolation second order /*ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - ftemp[(i - 2) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - ftemp[(i - 2) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - ftemp[(i - 2) + nx*j + nx*ny * 7];*/ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 8];*/ //Extrapolation 2nd order + moving wall /*ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j - 1) + nx*ny * 3] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 3] - ftemp[i + nx*(j - 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*(j - 1) + nx*ny * 6] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 6] - ftemp[i + nx*(j - 2) + nx*ny * 6]); rho_extra = rho[i + nx*(j - 1)] + 0.5 * (rho[i + nx*(j - 1)] - rho[i + nx*(j - 2)]); Ux_extra = Ux[i + nx*(j - 1)] + 0.5 * (Ux[i + nx*(j - 1)] - Ux[i + nx*(j - 2)]); ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; */ //Zou - He boundary ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = 0.5 * (rho1 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 7])); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; } // ============================================================================ // // ============================================================================ // // BOTTOM-RIGHT CORNER (EXTRAPOLATION & PERIODIC) // ============================================================================ // if ((i == nx - 1) && (j == 0)) { //Extrapolation /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 0] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 0] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 1] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 1] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 2] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 2] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 3] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 3] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 4] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 4] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 5] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 5] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 6] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 6] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 7] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 7] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 8] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 8] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 8]); */ //Extrapolation high order /* ftemp[i + nx*j + nx*ny * 3] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 3] + ftemp[(i - 3) + nx*j + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 6] + ftemp[(i - 3) + nx*j + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 7] + ftemp[(i - 3) + nx*j + nx*ny * 7]); */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*(j + 1) + nx*ny * 3] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*(j + 1) + nx*ny * 6] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*(j + 1) + nx*ny * 7] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 2] = 2.0*ftemp[(i - 1) + nx*(j + 1) + nx*ny * 2] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 5] = 2.0*ftemp[(i - 1) + nx*(j + 1) + nx*ny * 5] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 5];*/ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*j + nx*ny * 7];*/ //Extrapolation second order /*ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - ftemp[(i - 2) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - ftemp[(i - 2) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - ftemp[(i - 2) + nx*j + nx*ny * 7]; */ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 8]; */ //Extrapolation 2nd order + moving wall /*ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j + 1) + nx*ny * 3] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 3] - ftemp[i + nx*(j + 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*(j + 1) + nx*ny * 7] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 7] - ftemp[i + nx*(j + 2) + nx*ny * 7]); rho_extra = rho[i + nx*(j + 1)] + 0.5 * (rho[i + nx*(j + 1)] - rho[i + nx*(j + 2)]); Ux_extra = Ux[i + nx*(j + 1)] + 0.5 * (Ux[i + nx*(j + 1)] - Ux[i + nx*(j + 2)]); ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0)*ru; */ //Zou - He boundary ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 5] = 0.5 * (rho1 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 8])); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; } // ============================================================================ // } void LBM_GPU::BC_extra() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_BC_extra << < dimGrid, dimBlock >> > (d_ftemp, d_Ux, d_rho, nx, ny, a, rho1); } __global__ void Kernel_Eq(float* ftemp, float* feq, float* Ux, float* Uy, float* rho, float* ex, float* ey, int nx, int ny, int a, int* is_solid_node, float c) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; //Calculation of Macroscopic var if (!is_solid_node[i + nx*j]){ rho[i + nx*j] = ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]; Ux[i + nx*j] = ftemp[i + nx*j + nx*ny * 1] * ex[1] + ftemp[i + nx*j + nx*ny * 3] * ex[3] + ftemp[i + nx*j + nx*ny * 5] * ex[5] + ftemp[i + nx*j + nx*ny * 6] * ex[6] + ftemp[i + nx*j + nx*ny * 7] * ex[7] + ftemp[i + nx*j + nx*ny * 8] * ex[8]; Uy[i + nx*j] = ftemp[i + nx*j + nx*ny * 2] * ey[2] + ftemp[i + nx*j + nx*ny * 4] * ey[4] + ftemp[i + nx*j + nx*ny * 5] * ey[5] + ftemp[i + nx*j + nx*ny * 6] * ey[6] + ftemp[i + nx*j + nx*ny * 7] * ey[7] + ftemp[i + nx*j + nx*ny * 8] * ey[8]; Ux[i + nx*j] /= rho[i + nx*j]; Uy[i + nx*j] /= rho[i + nx*j]; feq[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); } } __global__ void Kernel_Collision(float* fN, float* ftemp, float* feq, int nx, int ny, int a, float tau, int* is_solid_node) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; if (!is_solid_node[i + nx*j]) { fN[i + nx*j + nx*ny*k] = ftemp[i + nx*j + nx*ny*k] - (ftemp[i + nx*j + nx*ny*k] - feq[i + nx*j + nx*ny*k]) / tau; } } void LBM_GPU::Collision() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Eq << < dimGrid, dimBlock >> > (d_ftemp, d_feq, d_Ux, d_Uy, d_rho, d_ex, d_ey, nx, ny, a, d_is_solid_node, c); Kernel_Collision << < dimGrid, dimBlock >> > (d_fN, d_ftemp, d_feq, nx, ny, a, tau, d_is_solid_node); } __global__ void Kernel_Error(float* f, float* Ux, float* Uy, float* U, float* rho, float* fN, float* UxN, float* UyN, float* UN, float* rhoN, float* ex, float* ey, int nx, int ny, int a, int* is_solid_node) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; if (!is_solid_node[i + nx*j]) { rho[i + nx*j] = f[i + nx*j + nx*ny * 0] + f[i + nx*j + nx*ny * 1] + f[i + nx*j + nx*ny * 2] + f[i + nx*j + nx*ny * 3] + f[i + nx*j + nx*ny * 4] + f[i + nx*j + nx*ny * 5] + f[i + nx*j + nx*ny * 6] + f[i + nx*j + nx*ny * 7] + f[i + nx*j + nx*ny * 8]; Ux[i + nx*j] = f[i + nx*j + nx*ny * 1] * ex[1] + f[i + nx*j + nx*ny * 3] * ex[3] + f[i + nx*j + nx*ny * 5] * ex[5] + f[i + nx*j + nx*ny * 6] * ex[6] + f[i + nx*j + nx*ny * 7] * ex[7] + f[i + nx*j + nx*ny * 8] * ex[8]; Uy[i + nx*j] = f[i + nx*j + nx*ny * 2] * ey[2] + f[i + nx*j + nx*ny * 4] * ey[4] + f[i + nx*j + nx*ny * 5] * ey[5] + f[i + nx*j + nx*ny * 6] * ey[6] + f[i + nx*j + nx*ny * 7] * ey[7] + f[i + nx*j + nx*ny * 8] * ey[8]; Ux[i + nx*j] /= rho[i + nx*j]; Uy[i + nx*j] /= rho[i + nx*j]; U[i + nx*j] = sqrt(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2)); rhoN[i + nx*j] = fN[i + nx*j + nx*ny * 0] + fN[i + nx*j + nx*ny * 1] + fN[i + nx*j + nx*ny * 2] + fN[i + nx*j + nx*ny * 3] + fN[i + nx*j + nx*ny * 4] + fN[i + nx*j + nx*ny * 5] + fN[i + nx*j + nx*ny * 6] + fN[i + nx*j + nx*ny * 7] + fN[i + nx*j + nx*ny * 8]; UxN[i + nx*j] = fN[i + nx*j + nx*ny * 1] * ex[1] + fN[i + nx*j + nx*ny * 3] * ex[3] + fN[i + nx*j + nx*ny * 5] * ex[5] + fN[i + nx*j + nx*ny * 6] * ex[6] + fN[i + nx*j + nx*ny * 7] * ex[7] + fN[i + nx*j + nx*ny * 8] * ex[8]; UyN[i + nx*j] = fN[i + nx*j + nx*ny * 2] * ey[2] + fN[i + nx*j + nx*ny * 4] * ey[4] + fN[i + nx*j + nx*ny * 5] * ey[5] + fN[i + nx*j + nx*ny * 6] * ey[6] + fN[i + nx*j + nx*ny * 7] * ey[7] + fN[i + nx*j + nx*ny * 8] * ey[8]; UxN[i + nx*j] /= rhoN[i + nx*j]; UyN[i + nx*j] /= rhoN[i + nx*j]; UN[i + nx*j] = sqrt(pow(UxN[i + nx*j], 2) + pow(UyN[i + nx*j], 2)); } } void LBM_GPU::Error() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Error << < dimGrid, dimBlock >> > (d_f, d_Ux, d_Uy, d_U, d_rho, d_fN, d_UxN, d_UyN, d_UN, d_rhoN, d_ex, d_ey, nx, ny, a, d_is_solid_node); hipMemcpy(U, d_U, nx*ny * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(UN, d_UN, nx*ny * sizeof(float), hipMemcpyDeviceToHost); sum = 0.0; for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { if (!is_solid_node[i + nx*j]) { sum = sum + pow(abs(UN[i + nx*j] - U[i + nx*j]), 2); } } } error = sqrt(sum / (nx*ny - sn)); } __global__ void Kernel_Update(float* fN, float* f, float* Ux, float* Uy, float* U, float* rho, float* ex, float* ey, int nx, int ny, int a, int* is_solid_node) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; if(!is_solid_node[i + nx*j]) f[i + nx*j + nx*ny*k] = fN[i + nx*j + nx*ny*k]; rho[i + nx*j] = f[i + nx*j + nx*ny * 0] + f[i + nx*j + nx*ny * 1] + f[i + nx*j + nx*ny * 2] + f[i + nx*j + nx*ny * 3] + f[i + nx*j + nx*ny * 4] + f[i + nx*j + nx*ny * 5] + f[i + nx*j + nx*ny * 6] + f[i + nx*j + nx*ny * 7] + f[i + nx*j + nx*ny * 8]; Ux[i + nx*j] = f[i + nx*j + nx*ny * 1] * ex[1] + f[i + nx*j + nx*ny * 3] * ex[3] + f[i + nx*j + nx*ny * 5] * ex[5] + f[i + nx*j + nx*ny * 6] * ex[6] + f[i + nx*j + nx*ny * 7] * ex[7] + f[i + nx*j + nx*ny * 8] * ex[8]; Uy[i + nx*j] = f[i + nx*j + nx*ny * 2] * ey[2] + f[i + nx*j + nx*ny * 4] * ey[4] + f[i + nx*j + nx*ny * 5] * ey[5] + f[i + nx*j + nx*ny * 6] * ey[6] + f[i + nx*j + nx*ny * 7] * ey[7] + f[i + nx*j + nx*ny * 8] * ey[8]; Ux[i + nx*j] /= rho[i + nx*j]; Uy[i + nx*j] /= rho[i + nx*j]; U[i + nx*j] = sqrt(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2)); } void LBM_GPU::Update() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Update << < dimGrid, dimBlock >> > (d_fN, d_f, d_Ux, d_Uy, d_U, d_rho, d_ex, d_ey, nx, ny, a, d_is_solid_node); } void LBM_GPU::Momentum() { hipMemcpy(f, d_f, nx*ny*a * sizeof(float), hipMemcpyDeviceToHost); sum_Fx1 = 0.0; sum_Fx3 = 0.0; sum_Fx5 = 0.0; sum_Fx6 = 0.0; sum_Fx7 = 0.0; sum_Fx8 = 0.0; sum_Fy2 = 0.0; sum_Fy4 = 0.0; sum_Fy5 = 0.0; sum_Fy6 = 0.0; sum_Fy7 = 0.0; sum_Fy8 = 0.0; sum_Fx = 0.0; sum_Fy = 0.0; Fx = 0.0; Fy = 0.0; Cd = 0.0; Cl = 0.0; for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { sum_Fx1 = 0.0; sum_Fx3 = 0.0; sum_Fx5 = 0.0; sum_Fx6 = 0.0; sum_Fx7 = 0.0; sum_Fx8 = 0.0; sum_Fy2 = 0.0; sum_Fy4 = 0.0; sum_Fy5 = 0.0; sum_Fy6 = 0.0; sum_Fy7 = 0.0; sum_Fy8 = 0.0; sum_Fx = 0.0; sum_Fy = 0.0; in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; dist = sqrt(pow((float)i - ic, 2) + pow((float)j - jc, 2)); q = dist - r; if (!is_boundary_node[i + nx*j]) { if (!is_solid_node[i + nx*j]) { if (is_solid_near_node[i + nx*j]) { if (is_solid_node[ip + nx*j]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 3] = 2.0 * q * f[i + nx*j + nx*ny * 1] + (1.0 - 2.0*q)*f[(i - 1) + nx*j + nx*ny * 1]; else ftemp[i + nx*j + nx*ny * 3] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 1] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 3]; sum_Fx1 = ex[1] * (ftemp[i + nx*j + nx*ny * 3] + f[i + nx*j + nx*ny * 1]); } if (is_solid_node[i + nx*jp]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 4] = 2.0 * q * f[i + nx*j + nx*ny * 2] + (1.0 - 2.0*q)*f[i + nx*(j - 1) + nx*ny * 2]; else ftemp[i + nx*j + nx*ny * 4] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 2] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 4]; sum_Fy2 = ey[2] * (ftemp[i + nx*j + nx*ny * 4] + f[i + nx*j + nx*ny * 2]); } if (is_solid_node[in + nx*j]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 1] = 2.0 * q * f[i + nx*j + nx*ny * 3] + (1.0 - 2.0*q)*f[(i + 1) + nx*j + nx*ny * 3]; else ftemp[i + nx*j + nx*ny * 1] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 3] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 1]; sum_Fx3 = ex[3] * (ftemp[i + nx*j + nx*ny * 1] + f[i + nx*j + nx*ny * 3]); } if (is_solid_node[i + nx*jn]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 2] = 2.0 * q * f[i + nx*j + nx*ny * 4] + (1.0 - 2.0*q)*f[i + nx*(j + 1) + nx*ny * 4]; else ftemp[i + nx*j + nx*ny * 2] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 4] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 2]; sum_Fy4 = ey[4] * (ftemp[i + nx*j + nx*ny * 2] + f[i + nx*j + nx*ny * 4]); } if (is_solid_node[ip + nx*jp]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 7] = 2.0 * q * f[i + nx*j + nx*ny * 5] + (1.0 - 2.0*q)*f[(i - 1) + nx*(j - 1) + nx*ny * 5]; else ftemp[i + nx*j + nx*ny * 7] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 5] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 7]; sum_Fx5 = ex[5] * (ftemp[i + nx*j + nx*ny * 7] + f[i + nx*j + nx*ny * 5]); sum_Fy5 = ey[5] * (ftemp[i + nx*j + nx*ny * 7] + f[i + nx*j + nx*ny * 5]); } if (is_solid_node[in + nx*jp]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 8] = 2.0 * q * f[i + nx*j + nx*ny * 6] + (1.0 - 2.0*q)*f[(i + 1) + nx*(j - 1) + nx*ny * 6]; else ftemp[i + nx*j + nx*ny * 8] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 6] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 8]; sum_Fx6 = ex[6] * (ftemp[i + nx*j + nx*ny * 8] + f[i + nx*j + nx*ny * 6]); sum_Fy6 = ey[6] * (ftemp[i + nx*j + nx*ny * 8] + f[i + nx*j + nx*ny * 6]); } if (is_solid_node[in + nx*jn]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 5] = 2.0 * q * f[i + nx*j + nx*ny * 7] + (1.0 - 2.0*q)*f[(i + 1) + nx*(j + 1) + nx*ny * 7]; else ftemp[i + nx*j + nx*ny * 5] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 7] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 5]; sum_Fx7 = ex[7] * (ftemp[i + nx*j + nx*ny * 5] + f[i + nx*j + nx*ny * 7]); sum_Fy7 = ey[7] * (ftemp[i + nx*j + nx*ny * 5] + f[i + nx*j + nx*ny * 7]); } if (is_solid_node[ip + nx*jn]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 6] = 2.0 * q * f[i + nx*j + nx*ny * 8] + (1.0 - 2.0*q)*f[(i - 1) + nx*(j + 1) + nx*ny * 8]; else ftemp[i + nx*j + nx*ny * 6] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 8] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 6]; sum_Fx8 = ex[8] * (ftemp[i + nx*j + nx*ny * 6] + f[i + nx*j + nx*ny * 8]); sum_Fy8 = ey[8] * (ftemp[i + nx*j + nx*ny * 6] + f[i + nx*j + nx*ny * 8]); } sum_Fx = sum_Fx1 + sum_Fx3 + sum_Fx5 + sum_Fx6 + sum_Fx7 + sum_Fx8; sum_Fy = sum_Fy2 + sum_Fy4 + sum_Fy5 + sum_Fy6 + sum_Fy7 + sum_Fy8; Fx = Fx + sum_Fx; Fy = Fy + sum_Fy; } } } } } Cd = 2.0*Fx / (rho1*pow((2.0 / 3.0)*Um, 2)*snx); Cl = 2.0*Fy / (rho1*pow((2.0 / 3.0)*Um, 2)*sny); fout_GPU_Cd << Cd << "\t" << Cl << endl; } void LBM_GPU::Print() { hipMemcpy(Ux, d_Ux, nx*ny * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(Uy, d_Uy, nx*ny * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(U, d_U, nx*ny * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(rho, d_rho, nx*ny * sizeof(float), hipMemcpyDeviceToHost); // ============================================================================ // // CHANGE LBM -> PHYSICAL // ============================================================================ // for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { Ux_p[i + nx*j] = Ux[i + nx*j]; Uy_p[i + nx*j] = Uy[i + nx*j]; U_p[i + nx*j] = U[i + nx*j]; P[i + nx*j] = rho[i + nx*j] / (3.0); } } // ============================================================================ // fout_GPU << endl; fout_GPU << "variables = X Y Ux Uy U rho P" << endl; fout_GPU << "zone i=" << nx << " j=" << ny << endl; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { fout_GPU << i << "\t" << j << "\t" << Ux[i + nx*j] << "\t" << Uy[i + nx*j] << "\t" << U[i + nx*j] << "\t" << rho[i + nx*j] << "\t" << P[i + nx*j] << endl; } } fout_GPU << endl; i = 0; fout_GPU_Ux << "variables = X Y Ux" << endl; fout_GPU_Ux << "zone i=" << nx << " j=" << ny << endl; for (j = 0; j < ny; j++) { fout_GPU_Ux << i << "\t" << j << "\t" << Ux[i + nx*j] << endl; } fout_GPU_Ux << endl; } LBM_GPU::~LBM_GPU() { hipFree(d_Ux0); hipFree(d_is_boundary_node); hipFree(d_is_solid_node); hipFree(d_f); hipFree(d_fN); hipFree(d_ftemp); hipFree(d_feq); hipFree(d_Ux); hipFree(d_Uy); hipFree(d_rho); hipFree(d_ex); hipFree(d_ey); hipFree(d_U); hipFree(d_UN); hipFree(d_UxN); hipFree(d_UyN); hipFree(rhoN); delete[] Ux0; delete[] Ux0_p; delete[] P; delete[] Uy_p; delete[] Ux_p; delete[] U_p; delete[] ey; delete[] ex; delete[] fN; delete[] feq; delete[] ftemp; delete[] f; delete[] rhoN; delete[] UyN; delete[] UxN; delete[] UN; delete[] rho; delete[] Uy; delete[] Ux; delete[] U; delete[] is_boundary_node; delete[] is_solid_node; delete[] is_solid_near_node; cout << endl << "Done!" << endl; }
b22bd964266fdd23119d5aeba6cc7f654df2b7bf.cu
#include "LBM_GPU.cuh" ifstream fin_GPU("in_GPU.txt"); ofstream fout_GPU("out_GPU.dat"); ofstream fout_GPU_Cd("out_GPU_Cd.dat"); ofstream fout_GPU_Ux0("out_GPU_Ux0.dat"); ofstream fout_GPU_Ux("out_GPU_Ux.dat"); LBM_GPU::LBM_GPU() { // ============================================================================ // // LOAD THE PARAMETERS // ============================================================================ // fin_GPU >> nx; fin_GPU >> comment; fin_GPU >> ny; fin_GPU >> comment; fin_GPU >> Lx; fin_GPU >> comment; fin_GPU >> Ly; fin_GPU >> comment; fin_GPU >> a; fin_GPU >> comment; fin_GPU >> rho1; fin_GPU >> comment; fin_GPU >> BLOCK_SIZE_X; fin_GPU >> comment; fin_GPU >> BLOCK_SIZE_Y; fin_GPU >> comment; fin_GPU >> BLOCK_SIZE_Z; fin_GPU >> comment; fin_GPU >> D; fin_GPU >> comment; fin_GPU >> Um_p; fin_GPU >> comment; fin_GPU >> tau; fin_GPU >> comment; fin_GPU >> nu_p; fin_GPU >> comment; // ============================================================================ // // ============================================================================ // // NEW & CUDAMALLOC // ============================================================================ // is_boundary_node = new int[nx*ny]; cudaMalloc((void**)&d_is_boundary_node, nx*ny * sizeof(int)); is_solid_node = new int[nx*ny]; cudaMalloc((void**)&d_is_solid_node, nx*ny * sizeof(int)); is_solid_near_node = new int[nx*ny]; U = new float[nx*ny]; cudaMalloc((void**)&d_U, nx*ny * sizeof(float)); Ux = new float[nx*ny]; cudaMalloc((void**)&d_Ux, nx*ny * sizeof(float)); Uy = new float[nx*ny]; cudaMalloc((void**)&d_Uy, nx*ny * sizeof(float)); rho = new float[nx*ny]; cudaMalloc((void**)&d_rho, nx*ny * sizeof(float)); UN = new float[nx*ny]; cudaMalloc((void**)&d_UN, nx*ny * sizeof(float)); UxN = new float[nx*ny]; cudaMalloc((void**)&d_UxN, nx*ny * sizeof(float)); UyN = new float[nx*ny]; cudaMalloc((void**)&d_UyN, nx*ny * sizeof(float)); rhoN = new float[nx*ny]; cudaMalloc((void**)&d_rhoN, nx*ny * sizeof(float)); f = new float[nx*ny*a]; cudaMalloc((void**)&d_f, nx*ny*a * sizeof(float)); ftemp = new float[nx*ny*a]; cudaMalloc((void**)&d_ftemp, nx*ny*a * sizeof(float)); fN = new float[nx*ny*a]; cudaMalloc((void**)&d_fN, nx*ny*a * sizeof(float)); feq = new float[nx*ny*a]; cudaMalloc((void**)&d_feq, nx*ny*a * sizeof(float)); ex = new float[a]; cudaMalloc((void**)&d_ex, a * sizeof(float)); ey = new float[a]; cudaMalloc((void**)&d_ey, a * sizeof(float)); U_p = new float[nx*ny]; Ux_p = new float[nx*ny]; Uy_p = new float[nx*ny]; P = new float[nx*ny]; Ux0_p = new float[ny]; Ux0 = new float[ny]; cudaMalloc((void**)&d_Ux0, ny * sizeof(float)); // ============================================================================ // // ============================================================================ // // MICROSCOPIC VELOCITY // ============================================================================ // ex[0] = 0.0, ey[0] = 0.0; ex[1] = 1.0, ey[1] = 0.0; ex[2] = 0.0, ey[2] = 1.0; ex[3] = -1.0, ey[3] = 0.0; ex[4] = 0.0, ey[4] = -1.0; ex[5] = 1.0, ey[5] = 1.0; ex[6] = -1.0, ey[6] = 1.0; ex[7] = -1.0, ey[7] = -1.0; ex[8] = 1.0, ey[8] = -1.0; cudaMemcpy(d_ex, ex, a * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_ey, ey, a * sizeof(float), cudaMemcpyHostToDevice); // ============================================================================ // // ============================================================================ // // SET BOUNDARY NODE // ============================================================================ // sIm = nx / Lx * 0.15; sIM = nx / Lx * (0.15 + D) - 1; sJm = ny / Ly * 0.15; sJM = ny / Ly * (0.15 + D) - 1; snx = (sIM - sIm) + 1; sny = (sJM - sJm) + 1; sn = 0; ic = (float)sIm + ((float)sIM - (float)sIm) / 2; jc = (float)sJm + ((float)sJM - (float)sJm) / 2; r = ((float)sIM - (float)sIm) / 2; cout << "sIm = " << sIm << endl; cout << "sIM = " << sIM << endl; cout << "sJm = " << sJm << endl; cout << "sJM = " << sJM << endl; cout << "snx = " << snx << endl; cout << "sny = " << sny << endl; cout << "ic = " << ic << endl; cout << "jc = " << jc << endl; cout << "r = " << r << endl; //set boundary node for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { if (i == 0 || i == nx - 1 || j == 0 || j == ny - 1) is_boundary_node[i + nx*j] = 1; else is_boundary_node[i + nx*j] = 0; } } //Binary data /*for (i = 0; i < nx; i++) { for (j = ny - 1; j > -1; j--) { if ((i >= sIm && i <= sIM) && (j >= sJm && j <= sJM)) fin_grid_GPU >> is_solid_node[i + nx*j]; else is_solid_node[i + nx*j] = 0; if (is_solid_node[i + nx*j]) sn = sn + 1; } }*/ //set solid node for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { dist = sqrt(pow((float)i - ic, 2) + pow((float)j - jc, 2)); if (dist <= r) is_solid_node[i + nx*j] = 1; else is_solid_node[i + nx*j] = 0; if (is_solid_node[i + nx*j]) sn = sn + 1; } } //set near solid node for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { is_solid_near_node[i + nx*j] = 0; in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; if (!is_boundary_node[i + nx*j]) { if (!is_solid_node[i + nx*j]) { if (is_solid_node[ip + nx*j]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[i + nx*jp]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[in + nx*j]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[i + nx*jn]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[ip + nx*jp]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[in + nx*jp]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[in + nx*jn]) { is_solid_near_node[i + nx*j] = 1; } else if (is_solid_node[ip + nx*jn]) { is_solid_near_node[i + nx*j] = 1; } } } } } cudaMemcpy(d_is_boundary_node, is_boundary_node, nx*ny * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_is_solid_node, is_solid_node, nx*ny * sizeof(int), cudaMemcpyHostToDevice); // ============================================================================ // // ============================================================================ // // SET PARAMETERS & INITIAL CONDITION // ============================================================================ // del_x = 1.0; del_y = 1.0; del_t = 1.0; c = del_y / del_t; c_s = (1.0 / sqrt(3.0))*c; del_x_p = D / (float)snx; del_y_p = D / (float)sny; // del_t_p = pow(del_y_p, 2); // del_t_p = 0.000013; //Uniform /*nu_p = 0.06 * (del_y_p / del_t_p) * D / Re; nu = (del_t_p / pow(del_y_p, 2))*nu_p; tau = (1.0 / pow(c_s, 2))*nu + (0.5*del_t);*/ //Input Reynolds number and del_t //Um = Um_p * (del_t_p / del_y_p); //nu_p = (2.0 / 3.0) * Um_p * D / Re; //nu = (del_t_p / pow(del_y_p, 2))*nu_p; //tau = (1.0 / pow(c_s, 2))*nu + (0.5*del_t); //Input tau and kinematic viscosity del_t_p = pow(c_s, 2)*(tau - 0.5)*pow(del_y_p, 2) / nu_p; Re = (2.0 / 3.0) * Um_p * D / nu_p; Um = Um_p * del_t_p / del_y_p; nu = nu_p * del_t_p / pow(del_y_p, 2); cout << endl; cout << "// =================== Stability condition ================ //" << endl; cout << "Check 1. [tau > 0.5]" << endl; cout << "tau = " << tau << endl; cout << "Check 2. Mach number condition [Ma = Uavg/c_s << 1]" << endl; cout << "Ma = " << (2.0/3.0)*Um/c_s << endl; cout << "Check 3. BGK Stability. [If tau < 0.55, tau > 0.5 + 0.125*Uavg]" << endl; cout << "tau = " << tau << " > " << 0.5 + 0.125*(2.0 / 3.0)*Um << endl; cout << "// ======================================================== //" << endl; //intitalize variables for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { Ux[i + nx*j] = 0.0; Uy[i + nx*j] = 0.0; U[i + nx*j] = 0.0; UxN[i + nx*j] = 0.0; UyN[i + nx*j] = 0.0; UN[i + nx*j] = 0.0; P[i + nx*j] = 0.0; for (k = 0; k < a; k++) { ftemp[i + nx*j + nx*ny*k] = 0.0; feq[i + nx*j + nx*ny*k] = 0.0; fN[i + nx*j + nx*ny*k] = 0.0; } if (!is_solid_node[i + nx*j]) rho[i + nx*j] = 1.0; else rho[i + nx*j] = 1.0; f[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j]; f[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j]; } } cudaMemcpy(d_rho, rho, nx*ny * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_f, f, nx*ny*a * sizeof(float), cudaMemcpyHostToDevice); //set velocity profile at inlet for (j = 0; j < ny; j++) { Ux0_p[j] = 4.0*Um_p / (pow(Ly, 2))*(((float)j + 1) - 0.5)*del_y_p*(Ly - (((float)j + 1) - 0.5)*del_y_p); // Ux0[j] = 4.0*Um / (pow(ny, 2))*(((float)j + 1) - 0.5)*del_y*(ny - (((float)j + 1) - 0.5)*del_y); Ux0[j] = Ux0_p[j] * (del_t_p / del_y_p); fout_GPU_Ux0 << Ux0[j] << endl; } cudaMemcpy(d_Ux0, Ux0, ny * sizeof(float), cudaMemcpyHostToDevice); // ============================================================================ // } __global__ void Kernel_Streaming(float* f, float* ftemp, int* is_boundary_node, int* is_solid_node, int nx, int ny, int a, float ic, float jc, float r) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; int in, ip, jn, jp; in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; float dist = sqrt(pow((float)i - ic, 2) + pow((float)j - jc, 2)); float q = dist - r; if (!is_boundary_node[i + nx*j]) { if (!is_solid_node[i + nx*j]) { ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; if (!is_solid_node[ip + nx*j]) ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 3] = 2.0 * q * f[i + nx*j + nx*ny * 1] + (1.0 - 2.0*q)*f[(i - 1) + nx*j + nx*ny * 1]; else ftemp[i + nx*j + nx*ny * 3] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 1] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 3]; } if (!is_solid_node[i + nx*jp]) ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 4] = 2.0 * q * f[i + nx*j + nx*ny * 2] + (1.0 - 2.0*q)*f[i + nx*(j - 1) + nx*ny * 2]; else ftemp[i + nx*j + nx*ny * 4] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 2] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 4]; } if (!is_solid_node[in + nx*j]) ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 1] = 2.0 * q * f[i + nx*j + nx*ny * 3] + (1.0 - 2.0*q)*f[(i + 1) + nx*j + nx*ny * 3]; else ftemp[i + nx*j + nx*ny * 1] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 3] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 1]; } if (!is_solid_node[i + nx*jn]) ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 2] = 2.0 * q * f[i + nx*j + nx*ny * 4] + (1.0 - 2.0*q)*f[i + nx*(j + 1) + nx*ny * 4]; else ftemp[i + nx*j + nx*ny * 2] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 4] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 2]; } if (!is_solid_node[ip + nx*jp]) ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 7] = 2.0 * q * f[i + nx*j + nx*ny * 5] + (1.0 - 2.0*q)*f[(i - 1) + nx*(j - 1) + nx*ny * 5]; else ftemp[i + nx*j + nx*ny * 7] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 5] + (2.0*q - 1) / (2.0*q)*f[i + nx*j + nx*ny * 7]; } if (!is_solid_node[in + nx*jp]) ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 8] = 2.0 * q * f[i + nx*j + nx*ny * 6] + (1.0 - 2.0*q)*f[(i + 1) + nx*(j - 1) + nx*ny * 6]; else ftemp[i + nx*j + nx*ny * 8] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 6] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 8]; } if (!is_solid_node[in + nx*jn]) ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 5] = 2.0 * q * f[i + nx*j + nx*ny * 7] + (1.0 - 2.0*q)*f[(i + 1) + nx*(j + 1) + nx*ny * 7]; else ftemp[i + nx*j + nx*ny * 5] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 7] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 5]; } if (!is_solid_node[ip + nx*jn]) ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; else { if (q < 0.5) ftemp[i + nx*j + nx*ny * 6] = 2.0 * q * f[i + nx*j + nx*ny * 8] + (1.0 - 2.0*q)*f[(i - 1) + nx*(j + 1) + nx*ny * 8]; else ftemp[i + nx*j + nx*ny * 6] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 8] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 6]; } } } else { if ((i == 0) && (j > 0 && j < ny - 1)) { //INLET ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; } else if ((i > 0 && i < nx - 1) && (j == ny - 1)) { //TOP ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; } else if ((i > 0 && i < nx - 1) && (j == 0)) { //BOTTOM ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; } else if ((i == nx - 1) && (j > 0 && j < ny - 1)) { //OUTLET ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; } else if ((i == 0) && (j == 0)) { //BOTTOM-LEFT ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[ip + nx*jp + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; } else if ((i == 0) && (j == ny - 1)) { //TOP-LEFT ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[ip + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[ip + nx*jn + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; } else if ((i == nx - 1) && (j == ny - 1)) { //TOP-RIGHT ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*jn + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[in + nx*jn + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; } else if ((i == nx - 1) && (j == 0)) { //BOTTOM-RIGHT ftemp[i + nx*j + nx*ny * 0] = f[i + nx*j + nx*ny * 0]; ftemp[i + nx*jp + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[in + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 3]; ftemp[in + nx*jp + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; } } } void LBM_GPU::Streaming() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Streaming << < dimGrid, dimBlock >> > (d_f, d_ftemp, d_is_boundary_node, d_is_solid_node, nx, ny, a, ic, jc, r); } __global__ void Kernel_BC_bounceback(float* f, float* ftemp, float* rho, float* Ux, float* Uy, float* Ux0, float rho1, int nx, int ny, int a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; float rho0, ru, Ux1, Uy1, rho_extra, Ux_extra, Uy_extra; // ============================================================================ // // TOP BOUNDARY (HALF-AWAY BOUNCEBACK) // ============================================================================ // if ((i > 0 && i < nx - 1) && (j == ny - 1)){ //Bounce-back boundary ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; //Periodic boundary /*ftemp[i + nx*0 + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[(i + 1) + nx*0 + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; ftemp[(i - 1) + nx*0 + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; */ //Velocity boundary(first order) /*rho_extra = rho[i + nx*(j - 1)] + 0.5 * (rho[i + nx*(j - 1)] - rho[i + nx*(j - 2)]); Ux_extra = Ux[i + nx*(j - 1)]; ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru;*/ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[i + nx*(j - 1) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*(j - 1) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*(j - 1) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j - 1) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*(j - 1) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*(j - 1) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*(j - 1) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*(j - 1) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*(j - 1) + nx*ny * 8]; */ //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[i + nx*(j - 1) + nx*ny * 0] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 0] - ftemp[i + nx*(j - 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*(j - 1) + nx*ny * 1] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 1] - ftemp[i + nx*(j - 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*(j - 1) + nx*ny * 2] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 2] - ftemp[i + nx*(j - 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j - 1) + nx*ny * 3] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 3] - ftemp[i + nx*(j - 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*(j - 1) + nx*ny * 4] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 4] - ftemp[i + nx*(j - 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*(j - 1) + nx*ny * 5] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 5] - ftemp[i + nx*(j - 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*(j - 1) + nx*ny * 6] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 6] - ftemp[i + nx*(j - 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*(j - 1) + nx*ny * 7] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 7] - ftemp[i + nx*(j - 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*(j - 1) + nx*ny * 8] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 8] - ftemp[i + nx*(j - 2) + nx*ny * 8]); */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 4] = 2.0 * ftemp[i + nx*(j - 1) + nx*ny * 4] - ftemp[i + nx*(j - 2) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 7] = 2.0 * ftemp[i + nx*(j - 1) + nx*ny * 7] - ftemp[i + nx*(j - 2) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = 2.0 * ftemp[i + nx*(j - 1) + nx*ny * 8] - ftemp[i + nx*(j - 2) + nx*ny * 8]; */ //Equilibrium /*float c = 1; ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); */ //NEBB method /*ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] + (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 1] - ftemp[i + nx*j + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] - (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 1] - ftemp[i + nx*j + nx*ny * 3]); */ } // ============================================================================ // // ============================================================================ // // BOTTOM BOUNDARY (HALF-AWAY BOUNCEBACK) // ============================================================================ // if ((i > 0 && i < nx - 1) && (j == 0)){ //Bounce-back boundary ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; //Periodic boundary /*ftemp[i + nx*(ny - 1) + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[(i - 1) + nx*(ny - 1) + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; ftemp[(i + 1) + nx*(ny - 1) + nx*ny * 8] = f[i + nx*j + nx*ny * 8];*/ //Velocity boundary(first order) /*rho_extra = rho[i + nx*(j + 1)] + 0.5 * (rho[i + nx*(j + 1)] - rho[i + nx*(j + 2)]); Ux_extra = Ux[i + nx*(j + 1)]; ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0)*ru;*/ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[i + nx*(j + 1) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*(j + 1) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*(j + 1) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j + 1) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*(j + 1) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*(j + 1) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*(j + 1) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*(j + 1) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*(j + 1) + nx*ny * 8]; */ //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[i + nx*(j + 1) + nx*ny * 0] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 0] - ftemp[i + nx*(j + 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*(j + 1) + nx*ny * 1] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 1] - ftemp[i + nx*(j + 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*(j + 1) + nx*ny * 2] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 2] - ftemp[i + nx*(j + 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j + 1) + nx*ny * 3] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 3] - ftemp[i + nx*(j + 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*(j + 1) + nx*ny * 4] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 4] - ftemp[i + nx*(j + 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*(j + 1) + nx*ny * 5] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 5] - ftemp[i + nx*(j + 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*(j + 1) + nx*ny * 6] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 6] - ftemp[i + nx*(j + 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*(j + 1) + nx*ny * 7] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 7] - ftemp[i + nx*(j + 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*(j + 1) + nx*ny * 8] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 8] - ftemp[i + nx*(j + 2) + nx*ny * 8]); */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 2] = 2.0 * ftemp[i + nx*(j + 1) + nx*ny * 2] - ftemp[i + nx*(j + 2) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 5] = 2.0 * ftemp[i + nx*(j + 1) + nx*ny * 5] - ftemp[i + nx*(j + 2) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = 2.0 * ftemp[i + nx*(j + 1) + nx*ny * 6] - ftemp[i + nx*(j + 2) + nx*ny * 6]; */ //Equilibrium /*float c = 1; ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); */ //NEBB method /*ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 3] - ftemp[i + nx*j + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 3] - ftemp[i + nx*j + nx*ny * 1]); */ } // ============================================================================ // // ============================================================================ // // LEFT BOUNDARY (VELOCITY) // ============================================================================ // if ((i == 0) && (j > 0 && j < ny - 1)) { /*rho0 = rho[(i + 1) + nx*j] + 0.5*(rho[(i + 1) + nx*j] - rho[(i + 2) + nx*j]); ru = rho0 * Ux0; ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru;*/ /*rho0 = ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 4] + 2.0*(ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 6]); ru = rho0 * Ux0[j]; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru;*/ //Zou - He boundary rho0 = (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 4] + 2.0*(ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7])) / (1.0 - Ux0[j]); ru = rho0 * Ux0[j]; ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru - (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru + (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); //wet-node method // rho0 = (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 4] // + 2.0*(ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7])) / (1.0 - Ux0[j]); // ru = rho0 * Ux0[j]; //// ru = rho0 * 0.06; // // ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; // ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru - (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); // ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru + (1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); //Equilibrium /*float c = 1; ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4)) * pow(Ux0[j], 2) - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4))*pow(Ux0[j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); */ } // ============================================================================ // // ============================================================================ // // RIGHT BOUNDARY (EXTRAPOLATION) // ============================================================================ // if ((i == nx - 1) && (j > 0 && j < ny - 1)) { //Extrapolation // ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*j + nx*ny * 0] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 0] - ftemp[(i - 2) + nx*j + nx*ny * 0]); // ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*j + nx*ny * 1] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 1] - ftemp[(i - 2) + nx*j + nx*ny * 1]); // ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*j + nx*ny * 2] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 2] - ftemp[(i - 2) + nx*j + nx*ny * 2]); // ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*j + nx*ny * 3] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 3] - ftemp[(i - 2) + nx*j + nx*ny * 3]); // ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*j + nx*ny * 4] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 4] - ftemp[(i - 2) + nx*j + nx*ny * 4]); // ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*j + nx*ny * 5] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 5] - ftemp[(i - 2) + nx*j + nx*ny * 5]); // ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*j + nx*ny * 6] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 6] - ftemp[(i - 2) + nx*j + nx*ny * 6]); // ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*j + nx*ny * 7] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 7] - ftemp[(i - 2) + nx*j + nx*ny * 7]); // ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*j + nx*ny * 8] + 0.5 * (ftemp[(i - 1) + nx*j + nx*ny * 8] - ftemp[(i - 2) + nx*j + nx*ny * 8]); //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 0] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 0] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 0] + ftemp[(i - 3) + nx*j + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 1] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 1] + ftemp[(i - 3) + nx*j + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 2] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 2] + ftemp[(i - 3) + nx*j + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 3] + ftemp[(i - 3) + nx*j + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 4] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 4] + ftemp[(i - 3) + nx*j + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 5] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 5] + ftemp[(i - 3) + nx*j + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 6] + ftemp[(i - 3) + nx*j + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 7] + ftemp[(i - 3) + nx*j + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 8] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 8] + ftemp[(i - 3) + nx*j + nx*ny * 8]); */ //Extrapolation type2 /*ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - ftemp[(i - 2) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - ftemp[(i - 2) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - ftemp[(i - 2) + nx*j + nx*ny * 7];*/ //Extrapolation first order ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*j + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*j + nx*ny * 8]; //Extrapolation second order /*ftemp[i + nx*j + nx*ny * 0] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 0] - ftemp[(i - 2) + nx*j + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 1] - ftemp[(i - 2) + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 2] - ftemp[(i - 2) + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - ftemp[(i - 2) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 4] - ftemp[(i - 2) + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 5] - ftemp[(i - 2) + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - ftemp[(i - 2) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - ftemp[(i - 2) + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 8] - ftemp[(i - 2) + nx*j + nx*ny * 8];*/ //Velocity boundary (first order) /*rho_extra = rho[(i - 1) + nx*j] + 0.5*(rho[(i - 1) + nx*j] - rho[(i - 2) + nx*j]); Ux_extra = Ux[(i - 1) + nx*j]; Uy_extra = Uy[(i - 1) + nx*j]; ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1] - (2.0 / 3.0)*rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0)*rho_extra*(Ux_extra - Uy_extra); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0)*rho_extra*(Ux_extra + Uy_extra);*/ //Pressure boundary /*Ux1 = Ux[(i - 1) + nx*j] + 0.5*(Ux[(i - 1) + nx*j] - Ux[(i - 2) + nx*j]); Uy1 = Uy[(i - 1) + nx*j] + 0.5*(Uy[(i - 1) + nx*j] - Uy[(i - 2) + nx*j]); ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 - Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 + Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); */ //wet-node method /*Ux1 = -1.0 + (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 4] + 2.0*(ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 8])) / rho1; ru = rho1 * Ux1; ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1] - (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0)*ru -(1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0)*ru +(1.0 / 2.0)*(ftemp[i + nx*j + nx*ny * 2] - ftemp[i + nx*j + nx*ny * 4]); */ } // ============================================================================ // // ============================================================================ // // TOP-LEFT CORNER (EQUILIBRIUM) // ============================================================================ // if ((i == 0) && (j == ny - 1)) { //case 1 // ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; // ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; //// rho0 = rho[(i + 1) + nx*(j - 1)] + 0.5*(rho[(i + 1) + nx*(j - 1)] - rho[(i + 2) + nx*(j - 2)]); // rho0 = rho[i + nx*(j - 1)]; // ru = rho0 * Ux0[j]; // ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; // ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; // ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; //case 2 //ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; //ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; //ftemp[i + nx*j + nx*ny * 5] = -ftemp[i + nx*j + nx*ny * 7]; //rho0 = rho[(i + 1) + nx*(j - 1)] + 0.5*(rho[(i + 1) + nx*(j - 1)] - rho[(i + 2) + nx*(j - 2)]); //ru = rho0 * Ux0; //ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; //ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; //ftemp[i + nx*j + nx*ny * 0] = rho0 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //case 3 /*ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6];*/ //case 4 /*ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; rho0 = rho[(i + 0) + nx*(j - 1)] + 0.5*(rho[(i + 0) + nx*(j - 1)] - rho[(i + 0) + nx*(j - 2)]); ru = rho0 * Ux0[j]; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; */ //case 5 //// rho0 = rho[(i + 0) + nx*(j - 1)] + 0.5*(rho[(i + 0) + nx*(j - 1)] - rho[(i + 0) + nx*(j - 2)]); // rho0 = rho[i + nx*(j - 1)]; //// rho0 = 1.0; // ru = rho0 * Ux0[j]; //// ru = rho0* 0.005; // // ftemp[i + nx*j + nx*ny * 7] = -(1.0 / 12.0) * ru; // ftemp[i + nx*j + nx*ny * 5] = (1.0 / 12.0) * ru; // // ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; // ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; // ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; // // ftemp[i + nx*j + nx*ny * 0] = rho0 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //Periodic + Velocity /*ftemp[i + nx*0 + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[(i + 1) + nx*0 + nx*ny * 5] = f[i + nx*j + nx*ny * 5]; rho0 = rho[(i + 1) + nx*(j - 1)] + 0.5*(rho[(i + 1) + nx*(j - 1)] - rho[(i + 2) + nx*(j - 2)]); ru = rho0 * Ux0; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru;*/ //wet-node method // rho0 = rho[i + nx*(j - 1)]; //// rho0 = 1.001; //// rho0 = rho[(i + 0) + nx*(j - 1)] + 0.5*(rho[(i + 0) + nx*(j - 1)] - rho[(i + 0) + nx*(j - 2)]); //// rho0 = rho1; // // ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3]; // ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; // ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; // ftemp[i + nx*j + nx*ny * 5] = 0.5 * (rho0 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] // + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 8])); // ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; //Equilibrium float c = 1; /*ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4)) * pow(Ux0[j], 2) - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4))*pow(Ux0[j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); */ ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); } // ============================================================================ // // ============================================================================ // // BOTTOM-LEFT CORNER (EQUILIBRIUM) // ============================================================================ // if ((i == 0) && (j == 0)) { //case 1 // ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; // ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; //// rho0 = rho[(i + 1) + nx*(j + 1)] + 0.5*(rho[(i + 1) + nx*(j + 1)] - rho[(i + 2) + nx*(j + 2)]); // rho0 = rho[i + nx*(j + 1)]; // ru = rho0 * Ux0[j]; // ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; // ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; // ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; //case 2 //ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; //ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; //ftemp[i + nx*j + nx*ny * 8] = -ftemp[i + nx*j + nx*ny * 6]; //rho0 = rho[(i + 1) + nx*(j + 1)] + 0.5*(rho[(i + 1) + nx*(j + 1)] - rho[(i + 2) + nx*(j + 2)]); //ru = rho0 * Ux0; //ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; //ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; // //ftemp[i + nx*j + nx*ny * 0] = rho0 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //case 3 /*ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; */ //case 4 /*ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; rho0 = rho[(i + 0) + nx*(j + 1)] + 0.5*(rho[(i + 0) + nx*(j + 1)] - rho[(i + 0) + nx*(j + 2)]); ru = rho0 * Ux0[j]; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; */ //case 5 //// rho0 = rho[(i + 0) + nx*(j + 1)] + 0.5*(rho[(i + 0) + nx*(j + 1)] - rho[(i + 0) + nx*(j + 2)]); // rho0 = rho[i + nx*(j + 1)]; //// rho0 = 1.0; // ru = rho0 * Ux0[j]; //// ru = rho0* 0.005; // // ftemp[i + nx*j + nx*ny * 6] = -(1.0 / 12.0) * ru; // ftemp[i + nx*j + nx*ny * 8] = (1.0 / 12.0) * ru; // // ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; // ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; // ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; // // ftemp[i + nx*j + nx*ny * 0] = rho0 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //Periodic + Velocity /*ftemp[i + nx*(ny - 1) + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[(i + 1) + nx*(ny - 1) + nx*ny * 8] = f[i + nx*j + nx*ny * 8]; rho0 = rho[(i + 1) + nx*(j + 1)] + 0.5*(rho[(i + 1) + nx*(j + 1)] - rho[(i + 2) + nx*(j + 2)]); ru = rho0 * Ux0; ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3] + (2.0 / 3.0)*ru; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; */ //wet-node method // rho0 = rho[i + nx*(j + 1)]; //// rho0 = 1.001; //// rho0 = rho[(i + 0) + nx*(j + 1)] + 0.5*(rho[(i + 0) + nx*(j + 1)] - rho[(i + 0) + nx*(j + 2)]); //// rho0 = rho1; // // ftemp[i + nx*j + nx*ny * 1] = f[i + nx*j + nx*ny * 3]; // ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; // ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; // ftemp[i + nx*j + nx*ny * 6] = 0.5 * (rho0 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] // + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 7])); // ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; //Equilibrium float c = 1; /*ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4)) * pow(Ux0[j], 2) - (1.5 / pow(c, 2)) * (pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux0[j] + (4.5 / pow(c, 4))*pow(Ux0[j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux0[j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux0[j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux0[j], 2) + pow(Uy[i + nx*j], 2))); */ ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); } // ============================================================================ // // ============================================================================ // // TOP-RIGHT CORNER (EQUILIBRIUM) // ============================================================================ // if ((i == nx - 1) && (j == ny - 1)) { //case 1 /*ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; Ux1 = Ux[(i - 1) + nx*(j - 1)] + 0.5*(Ux[(i - 1) + nx*(j - 1)] - Ux[(i - 2) + nx*(j - 2)]); Uy1 = Uy[(i - 1) + nx*(j - 1)] + 0.5*(Uy[(i - 1) + nx*(j - 1)] - Uy[(i - 2) + nx*(j - 2)]); ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 - Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 + Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1));*/ //case 2 //ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; //ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; //ftemp[i + nx*j + nx*ny * 6] = -ftemp[i + nx*j + nx*ny * 8]; //Ux1 = Ux[(i - 1) + nx*(j - 1)] + 0.5*(Ux[(i - 1) + nx*(j - 1)] - Ux[(i - 2) + nx*(j - 2)]); //ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); //ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); //ftemp[i + nx*j + nx*ny * 0] = rho1 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //case 3 /*ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5];*/ //case 4 /*ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 8] = f[i + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; Ux1 = Ux[(i - 1) + nx*(j - 1)] + 0.5*(Ux[(i - 1) + nx*(j - 1)] - Ux[(i - 2) + nx*(j - 2)]); ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); */ //case 5 // Ux1 = Ux[(i - 1) + nx*(j - 0)]; //// rho_extra = rho[(i - 1) + nx*(j - 1)] + 0.5*(rho[(i - 1) + nx*(j - 1)] - rho[(i - 2) + nx*(j - 2)]); // rho_extra = rho[(i - 1) + nx*(j - 0)]; // ru = rho_extra * Ux1; // // ftemp[i + nx*j + nx*ny * 8] = (1.0 / 12.0) * ru; // ftemp[i + nx*j + nx*ny * 6] = -(1.0 / 12.0) * ru; // // ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; // ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1] - (2.0 / 3.0) * ru; // ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0) * ru; // // ftemp[i + nx*j + nx*ny * 0] = rho1 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //Periodic + Bounce back /*ftemp[i + nx*0 + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[(i - 1) + nx*0 + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; Ux1 = Ux[(i - 1) + nx*(j - 1)]; Uy1 = Uy[(i - 1) + nx*(j - 1)]; ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 - Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 + Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); */ //Periodic + Extrapolation /*ftemp[i + nx * 0 + nx*ny * 2] = f[i + nx*j + nx*ny * 2]; ftemp[(i - 1) + nx * 0 + nx*ny * 6] = f[i + nx*j + nx*ny * 6]; */ //wet-node method //// rho0 = rho[(i - 1) + nx*(j - 0)]; // rho0 = rho1; //// rho0 = rho[(i - 0) + nx*(j - 1)] + 0.5*(rho[(i - 0) + nx*(j - 1)] - rho[(i - 0) + nx*(j - 2)]); // // ftemp[i + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 1]; // ftemp[i + nx*j + nx*ny * 4] = f[i + nx*j + nx*ny * 2]; // ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5]; // ftemp[i + nx*j + nx*ny * 6] = 0.5 * (rho0 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] // + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 7])); // ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; //Equilibrium float c = 1; ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); } // ============================================================================ // // ============================================================================ // // BOTTOM-RIGHT CORNER (EQUILIBRIUM) // ============================================================================ // if ((i == nx - 1) && (j == 0)) { //case 1 /*ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; Ux1 = Ux[(i - 1) + nx*(j + 1)] + 0.5*(Ux[(i - 1) + nx*(j + 1)] - Ux[(i - 2) + nx*(j + 2)]); Uy1 = Uy[(i - 1) + nx*(j + 1)] + 0.5*(Uy[(i - 1) + nx*(j + 1)] - Uy[(i - 2) + nx*(j + 2)]); ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 - Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 + Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); */ //case 2 //ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; //ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; //ftemp[i + nx*j + nx*ny * 7] = -ftemp[i + nx*j + nx*ny * 5]; //Ux1 = Ux[(i - 1) + nx*(j + 1)] + 0.5*(Ux[(i - 1) + nx*(j + 1)] - Ux[(i - 2) + nx*(j + 2)]); //ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); //ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); // //ftemp[i + nx*j + nx*ny * 0] = rho1 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); //case 3 /*ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 7] = f[i + nx*j + nx*ny * 5];*/ //case 4 /*ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = f[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; Ux1 = Ux[(i - 1) + nx*(j + 1)] + 0.5*(Ux[(i - 1) + nx*(j + 1)] - Ux[(i - 2) + nx*(j + 2)]); ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + 3.0 * Ux1*Ux1); */ //case 5 // Ux1 = Ux[(i - 1) + nx*(j + 0)]; //// rho_extra = rho[(i - 1) + nx*(j + 1)] + 0.5*(rho[(i - 1) + nx*(j + 1)] - rho[(i - 2) + nx*(j + 2)]); // rho_extra = rho[(i - 1) + nx*(j + 0)]; // // ru = Ux1 * rho_extra; // // ftemp[i + nx*j + nx*ny * 5] = (1.0 / 12.0) * ru; // ftemp[i + nx*j + nx*ny * 7] = -(1.0 / 12.0) * ru; // // ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; // ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1] - (2.0 / 3.0) * ru; // ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0) * ru; // // ftemp[i + nx*j + nx*ny * 0] = rho1 - (ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] // + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]); // //Periodic + Bounce back /*ftemp[i + nx*(ny - 1) + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[(i - 1) + nx*(ny - 1) + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; Ux1 = Ux[(i - 1) + nx*(j + 1)]; Uy1 = Uy[(i - 1) + nx*(j + 1)]; ftemp[i + nx*j + nx*ny * 3] = -f[i + nx*j + nx*ny * 1] + (2.0 / 9.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 6] = -f[i + nx*j + nx*ny * 8] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 - Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1)); ftemp[i + nx*j + nx*ny * 7] = -f[i + nx*j + nx*ny * 5] + (1.0 / 18.0) * rho1 * (1.0 + (9.0 / 2.0)*pow(Ux1 + Uy1, 2) - (3.0 / 2.0)*(Ux1*Ux1 + Uy1*Uy1));*/ //Periodic + Extrapolation /*ftemp[i + nx*(ny - 1) + nx*ny * 4] = f[i + nx*j + nx*ny * 4]; ftemp[(i - 1) + nx*(ny - 1) + nx*ny * 7] = f[i + nx*j + nx*ny * 7]; */ //wet-node method //// rho0 = rho[(i - 1) + nx*(j + 0)]; // rho0 = rho1; //// rho0 = rho[(i - 0) + nx*(j + 1)] + 0.5*(rho[(i - 0) + nx*(j + 1)] - rho[(i - 0) + nx*(j + 2)]); // ftemp[i + nx*j + nx*ny * 2] = f[i + nx*j + nx*ny * 4]; // ftemp[i + nx*j + nx*ny * 6] = f[i + nx*j + nx*ny * 8]; // ftemp[i + nx*j + nx*ny * 3] = f[i + nx*j + nx*ny * 1]; // ftemp[i + nx*j + nx*ny * 5] = 0.5 * (rho0 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] // + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 8])); // ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; //Equilibrium float c = 1; ftemp[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); ftemp[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); } // ============================================================================ // } void LBM_GPU::BC_bounceback() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_BC_bounceback << < dimGrid, dimBlock >> > (d_f, d_ftemp, d_rho, d_Ux, d_Uy, d_Ux0, rho1, nx, ny, a); } __global__ void Kernel_BC_extra(float* ftemp, float* Ux, float* rho, int nx, int ny, int a, float rho1) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; float ru, Ux_extra, rho_extra; // ============================================================================ // // TOP-LEFT CORNER (VELOCITY & PERIODIC) // ============================================================================ // if ((i == 0) && (j == ny - 1)) { //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[(i + 0) + nx*(j - 1) + nx*ny * 8];*/ //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 0] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 0] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 1] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 1] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 2] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 2] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 3] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 3] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 4] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 4] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 5] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 5] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 6] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 6] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 7] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 7] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[(i + 1) + nx*(j - 1) + nx*ny * 8] + 0.5 * (ftemp[(i + 1) + nx*(j - 1) + nx*ny * 8] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 8]); */ //Extrapolation 2nd order + moving wall /*ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*(j - 1) + nx*ny * 1] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 1] - ftemp[i + nx*(j - 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*(j - 1) + nx*ny * 5] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 5] - ftemp[i + nx*(j - 2) + nx*ny * 5]); rho_extra = rho[i + nx*(j - 1)] + 0.5 * (rho[i + nx*(j - 1)] - rho[i + nx*(j - 2)]); Ux_extra = Ux[i + nx*(j - 1)] + 0.5 * (Ux[i + nx*(j - 1)] - Ux[i + nx*(j - 2)]); ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 1] = 2.0*ftemp[(i + 1) + nx*(j - 1) + nx*ny * 1] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 5] = 2.0*ftemp[(i + 1) + nx*(j - 1) + nx*ny * 5] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 4] = 2.0*ftemp[(i + 1) + nx*(j - 1) + nx*ny * 4] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i + 1) + nx*(j - 1) + nx*ny * 7] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = 2.0*ftemp[(i + 1) + nx*(j - 1) + nx*ny * 8] - ftemp[(i + 2) + nx*(j - 2) + nx*ny * 8]; */ //Zou - He boundary ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 5] = 0.5 * (rho1 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 8])); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; } // ============================================================================ // // ============================================================================ // // BOTTOM-LEFT CORNER (VELOCITY & PERIODIC) // ============================================================================ // if ((i == 0) && (j == 0)) { //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[(i + 0) + nx*(j + 1) + nx*ny * 8];*/ //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 0] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 0] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 1] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 1] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 2] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 2] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 3] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 3] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 4] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 4] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 5] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 5] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 6] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 6] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 7] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 7] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[(i + 1) + nx*(j + 1) + nx*ny * 8] + 0.5 * (ftemp[(i + 1) + nx*(j + 1) + nx*ny * 8] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 8]); */ //Extrapolation 2nd order + moving wall /*ftemp[i + nx*j + nx*ny * 1] = ftemp[i+ nx*(j + 1) + nx*ny * 1] + 0.5 * (ftemp[i+ nx*(j + 1) + nx*ny * 1] - ftemp[i+ nx*(j + 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 8] = ftemp[i+ nx*(j + 1) + nx*ny * 8] + 0.5 * (ftemp[i+ nx*(j + 1) + nx*ny * 8] - ftemp[i+ nx*(j + 2) + nx*ny * 8]); rho_extra = rho[i + nx*(j + 1)] + 0.5 * (rho[i + nx*(j + 1)] - rho[i + nx*(j + 2)]); Ux_extra = Ux[i + nx*(j + 1)] + 0.5 * (Ux[i + nx*(j + 1)] - Ux[i + nx*(j + 2)]); ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0)*ru; */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 1] = 2.0*ftemp[(i + 1) + nx*(j + 1) + nx*ny * 1] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 8] = 2.0*ftemp[(i + 1) + nx*(j + 1) + nx*ny * 8] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 2] = 2.0*ftemp[(i + 1) + nx*(j + 1) + nx*ny * 2] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 5] = 2.0*ftemp[(i + 1) + nx*(j + 1) + nx*ny * 5] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i + 1) + nx*(j + 1) + nx*ny * 6] - ftemp[(i + 2) + nx*(j + 2) + nx*ny * 6]; */ //Zou - He boundary ftemp[i + nx*j + nx*ny * 1] = ftemp[i + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 6] = 0.5 * (rho1 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 7])); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; } // ============================================================================ // // ============================================================================ // // TOP-RIGHT CORNER (EXTRAPOLATION & PERIODIC) // ============================================================================ // if ((i == nx - 1) && (j == ny - 1)) { //Extrapolation /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 0] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 0] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 1] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 1] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 2] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 2] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 3] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 3] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 4] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 4] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 5] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 5] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 6] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 6] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 7] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 7] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*(j - 1) + nx*ny * 8] + 0.5 * (ftemp[(i - 1) + nx*(j - 1) + nx*ny * 8] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 8]);*/ //Extrapolation high order /*ftemp[i + nx*j + nx*ny * 3] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 3] + ftemp[(i - 3) + nx*j + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 6] + ftemp[(i - 3) + nx*j + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 7] + ftemp[(i - 3) + nx*j + nx*ny * 7]); */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*(j - 1) + nx*ny * 3] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*(j - 1) + nx*ny * 6] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*(j - 1) + nx*ny * 7] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 4] = 2.0*ftemp[(i - 1) + nx*(j - 1) + nx*ny * 4] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 8] = 2.0*ftemp[(i - 1) + nx*(j - 1) + nx*ny * 8] - ftemp[(i - 2) + nx*(j - 2) + nx*ny * 8]; */ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*j + nx*ny * 7]; */ //Extrapolation second order /*ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - ftemp[(i - 2) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - ftemp[(i - 2) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - ftemp[(i - 2) + nx*j + nx*ny * 7];*/ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*(j - 0) + nx*ny * 8];*/ //Extrapolation 2nd order + moving wall /*ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j - 1) + nx*ny * 3] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 3] - ftemp[i + nx*(j - 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*(j - 1) + nx*ny * 6] + 0.5 * (ftemp[i + nx*(j - 1) + nx*ny * 6] - ftemp[i + nx*(j - 2) + nx*ny * 6]); rho_extra = rho[i + nx*(j - 1)] + 0.5 * (rho[i + nx*(j - 1)] - rho[i + nx*(j - 2)]); Ux_extra = Ux[i + nx*(j - 1)] + 0.5 * (Ux[i + nx*(j - 1)] - Ux[i + nx*(j - 2)]); ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5] - (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6] + (1.0 / 6.0)*ru; */ //Zou - He boundary ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 4] = ftemp[i + nx*j + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = 0.5 * (rho1 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 7])); ftemp[i + nx*j + nx*ny * 8] = ftemp[i + nx*j + nx*ny * 6]; } // ============================================================================ // // ============================================================================ // // BOTTOM-RIGHT CORNER (EXTRAPOLATION & PERIODIC) // ============================================================================ // if ((i == nx - 1) && (j == 0)) { //Extrapolation /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 0] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 0] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 0]); ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 1] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 1] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 1]); ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 2] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 2] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 2]); ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 3] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 3] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 4] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 4] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 4]); ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 5] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 5] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 5]); ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 6] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 6] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 7] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 7] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 7]); ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*(j + 1) + nx*ny * 8] + 0.5 * (ftemp[(i - 1) + nx*(j + 1) + nx*ny * 8] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 8]); */ //Extrapolation high order /* ftemp[i + nx*j + nx*ny * 3] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 3] + ftemp[(i - 3) + nx*j + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 6] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 6] + ftemp[(i - 3) + nx*j + nx*ny * 6]); ftemp[i + nx*j + nx*ny * 7] = (1.0 / 3.0) * (7.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - 5.0*ftemp[(i - 2) + nx*j + nx*ny * 7] + ftemp[(i - 3) + nx*j + nx*ny * 7]); */ //Extrapolation 2nd order /*ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*(j + 1) + nx*ny * 3] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*(j + 1) + nx*ny * 6] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*(j + 1) + nx*ny * 7] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 2] = 2.0*ftemp[(i - 1) + nx*(j + 1) + nx*ny * 2] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 5] = 2.0*ftemp[(i - 1) + nx*(j + 1) + nx*ny * 5] - ftemp[(i - 2) + nx*(j + 2) + nx*ny * 5];*/ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*j + nx*ny * 7];*/ //Extrapolation second order /*ftemp[i + nx*j + nx*ny * 3] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 3] - ftemp[(i - 2) + nx*j + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 6] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 6] - ftemp[(i - 2) + nx*j + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = 2.0*ftemp[(i - 1) + nx*j + nx*ny * 7] - ftemp[(i - 2) + nx*j + nx*ny * 7]; */ //Extrapolation first order /*ftemp[i + nx*j + nx*ny * 0] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 0]; ftemp[i + nx*j + nx*ny * 1] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 2] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 2]; ftemp[i + nx*j + nx*ny * 3] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 3]; ftemp[i + nx*j + nx*ny * 4] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 5]; ftemp[i + nx*j + nx*ny * 6] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 6]; ftemp[i + nx*j + nx*ny * 7] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 7]; ftemp[i + nx*j + nx*ny * 8] = ftemp[(i - 1) + nx*(j + 0) + nx*ny * 8]; */ //Extrapolation 2nd order + moving wall /*ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*(j + 1) + nx*ny * 3] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 3] - ftemp[i + nx*(j + 2) + nx*ny * 3]); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*(j + 1) + nx*ny * 7] + 0.5 * (ftemp[i + nx*(j + 1) + nx*ny * 7] - ftemp[i + nx*(j + 2) + nx*ny * 7]); rho_extra = rho[i + nx*(j + 1)] + 0.5 * (rho[i + nx*(j + 1)] - rho[i + nx*(j + 2)]); Ux_extra = Ux[i + nx*(j + 1)] + 0.5 * (Ux[i + nx*(j + 1)] - Ux[i + nx*(j + 2)]); ru = rho_extra*Ux_extra; ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 5] = ftemp[i + nx*j + nx*ny * 7] + (1.0 / 6.0)*ru; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8] - (1.0 / 6.0)*ru; */ //Zou - He boundary ftemp[i + nx*j + nx*ny * 2] = ftemp[i + nx*j + nx*ny * 4]; ftemp[i + nx*j + nx*ny * 6] = ftemp[i + nx*j + nx*ny * 8]; ftemp[i + nx*j + nx*ny * 3] = ftemp[i + nx*j + nx*ny * 1]; ftemp[i + nx*j + nx*ny * 5] = 0.5 * (rho1 - (ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 8])); ftemp[i + nx*j + nx*ny * 7] = ftemp[i + nx*j + nx*ny * 5]; } // ============================================================================ // } void LBM_GPU::BC_extra() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_BC_extra << < dimGrid, dimBlock >> > (d_ftemp, d_Ux, d_rho, nx, ny, a, rho1); } __global__ void Kernel_Eq(float* ftemp, float* feq, float* Ux, float* Uy, float* rho, float* ex, float* ey, int nx, int ny, int a, int* is_solid_node, float c) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; //Calculation of Macroscopic var if (!is_solid_node[i + nx*j]){ rho[i + nx*j] = ftemp[i + nx*j + nx*ny * 0] + ftemp[i + nx*j + nx*ny * 1] + ftemp[i + nx*j + nx*ny * 2] + ftemp[i + nx*j + nx*ny * 3] + ftemp[i + nx*j + nx*ny * 4] + ftemp[i + nx*j + nx*ny * 5] + ftemp[i + nx*j + nx*ny * 6] + ftemp[i + nx*j + nx*ny * 7] + ftemp[i + nx*j + nx*ny * 8]; Ux[i + nx*j] = ftemp[i + nx*j + nx*ny * 1] * ex[1] + ftemp[i + nx*j + nx*ny * 3] * ex[3] + ftemp[i + nx*j + nx*ny * 5] * ex[5] + ftemp[i + nx*j + nx*ny * 6] * ex[6] + ftemp[i + nx*j + nx*ny * 7] * ex[7] + ftemp[i + nx*j + nx*ny * 8] * ex[8]; Uy[i + nx*j] = ftemp[i + nx*j + nx*ny * 2] * ey[2] + ftemp[i + nx*j + nx*ny * 4] * ey[4] + ftemp[i + nx*j + nx*ny * 5] * ey[5] + ftemp[i + nx*j + nx*ny * 6] * ey[6] + ftemp[i + nx*j + nx*ny * 7] * ey[7] + ftemp[i + nx*j + nx*ny * 8] * ey[8]; Ux[i + nx*j] /= rho[i + nx*j]; Uy[i + nx*j] /= rho[i + nx*j]; feq[i + nx*j + nx*ny * 0] = (4.0 / 9.0) * rho[i + nx*j] * (1.0 - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 1] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4)) * pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2)) * (pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 2] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 3] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Ux[i + nx*j] + (4.5 / pow(c, 4))*pow(Ux[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 4] = (1.0 / 9.0) * rho[i + nx*j] * (1.0 - (3.0 / pow(c, 2)) * Uy[i + nx*j] + (4.5 / pow(c, 4))*pow(Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 5] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 6] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] + Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] + Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 7] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (-Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(-Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); feq[i + nx*j + nx*ny * 8] = (1.0 / 36.0) * rho[i + nx*j] * (1.0 + (3.0 / pow(c, 2)) * (Ux[i + nx*j] - Uy[i + nx*j]) + (4.5 / pow(c, 4))*pow(Ux[i + nx*j] - Uy[i + nx*j], 2) - (1.5 / pow(c, 2))*(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2))); } } __global__ void Kernel_Collision(float* fN, float* ftemp, float* feq, int nx, int ny, int a, float tau, int* is_solid_node) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; if (!is_solid_node[i + nx*j]) { fN[i + nx*j + nx*ny*k] = ftemp[i + nx*j + nx*ny*k] - (ftemp[i + nx*j + nx*ny*k] - feq[i + nx*j + nx*ny*k]) / tau; } } void LBM_GPU::Collision() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Eq << < dimGrid, dimBlock >> > (d_ftemp, d_feq, d_Ux, d_Uy, d_rho, d_ex, d_ey, nx, ny, a, d_is_solid_node, c); Kernel_Collision << < dimGrid, dimBlock >> > (d_fN, d_ftemp, d_feq, nx, ny, a, tau, d_is_solid_node); } __global__ void Kernel_Error(float* f, float* Ux, float* Uy, float* U, float* rho, float* fN, float* UxN, float* UyN, float* UN, float* rhoN, float* ex, float* ey, int nx, int ny, int a, int* is_solid_node) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; if (!is_solid_node[i + nx*j]) { rho[i + nx*j] = f[i + nx*j + nx*ny * 0] + f[i + nx*j + nx*ny * 1] + f[i + nx*j + nx*ny * 2] + f[i + nx*j + nx*ny * 3] + f[i + nx*j + nx*ny * 4] + f[i + nx*j + nx*ny * 5] + f[i + nx*j + nx*ny * 6] + f[i + nx*j + nx*ny * 7] + f[i + nx*j + nx*ny * 8]; Ux[i + nx*j] = f[i + nx*j + nx*ny * 1] * ex[1] + f[i + nx*j + nx*ny * 3] * ex[3] + f[i + nx*j + nx*ny * 5] * ex[5] + f[i + nx*j + nx*ny * 6] * ex[6] + f[i + nx*j + nx*ny * 7] * ex[7] + f[i + nx*j + nx*ny * 8] * ex[8]; Uy[i + nx*j] = f[i + nx*j + nx*ny * 2] * ey[2] + f[i + nx*j + nx*ny * 4] * ey[4] + f[i + nx*j + nx*ny * 5] * ey[5] + f[i + nx*j + nx*ny * 6] * ey[6] + f[i + nx*j + nx*ny * 7] * ey[7] + f[i + nx*j + nx*ny * 8] * ey[8]; Ux[i + nx*j] /= rho[i + nx*j]; Uy[i + nx*j] /= rho[i + nx*j]; U[i + nx*j] = sqrt(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2)); rhoN[i + nx*j] = fN[i + nx*j + nx*ny * 0] + fN[i + nx*j + nx*ny * 1] + fN[i + nx*j + nx*ny * 2] + fN[i + nx*j + nx*ny * 3] + fN[i + nx*j + nx*ny * 4] + fN[i + nx*j + nx*ny * 5] + fN[i + nx*j + nx*ny * 6] + fN[i + nx*j + nx*ny * 7] + fN[i + nx*j + nx*ny * 8]; UxN[i + nx*j] = fN[i + nx*j + nx*ny * 1] * ex[1] + fN[i + nx*j + nx*ny * 3] * ex[3] + fN[i + nx*j + nx*ny * 5] * ex[5] + fN[i + nx*j + nx*ny * 6] * ex[6] + fN[i + nx*j + nx*ny * 7] * ex[7] + fN[i + nx*j + nx*ny * 8] * ex[8]; UyN[i + nx*j] = fN[i + nx*j + nx*ny * 2] * ey[2] + fN[i + nx*j + nx*ny * 4] * ey[4] + fN[i + nx*j + nx*ny * 5] * ey[5] + fN[i + nx*j + nx*ny * 6] * ey[6] + fN[i + nx*j + nx*ny * 7] * ey[7] + fN[i + nx*j + nx*ny * 8] * ey[8]; UxN[i + nx*j] /= rhoN[i + nx*j]; UyN[i + nx*j] /= rhoN[i + nx*j]; UN[i + nx*j] = sqrt(pow(UxN[i + nx*j], 2) + pow(UyN[i + nx*j], 2)); } } void LBM_GPU::Error() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Error << < dimGrid, dimBlock >> > (d_f, d_Ux, d_Uy, d_U, d_rho, d_fN, d_UxN, d_UyN, d_UN, d_rhoN, d_ex, d_ey, nx, ny, a, d_is_solid_node); cudaMemcpy(U, d_U, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(UN, d_UN, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); sum = 0.0; for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { if (!is_solid_node[i + nx*j]) { sum = sum + pow(abs(UN[i + nx*j] - U[i + nx*j]), 2); } } } error = sqrt(sum / (nx*ny - sn)); } __global__ void Kernel_Update(float* fN, float* f, float* Ux, float* Uy, float* U, float* rho, float* ex, float* ey, int nx, int ny, int a, int* is_solid_node) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if (i >= nx || j >= ny || k >= a) return; if(!is_solid_node[i + nx*j]) f[i + nx*j + nx*ny*k] = fN[i + nx*j + nx*ny*k]; rho[i + nx*j] = f[i + nx*j + nx*ny * 0] + f[i + nx*j + nx*ny * 1] + f[i + nx*j + nx*ny * 2] + f[i + nx*j + nx*ny * 3] + f[i + nx*j + nx*ny * 4] + f[i + nx*j + nx*ny * 5] + f[i + nx*j + nx*ny * 6] + f[i + nx*j + nx*ny * 7] + f[i + nx*j + nx*ny * 8]; Ux[i + nx*j] = f[i + nx*j + nx*ny * 1] * ex[1] + f[i + nx*j + nx*ny * 3] * ex[3] + f[i + nx*j + nx*ny * 5] * ex[5] + f[i + nx*j + nx*ny * 6] * ex[6] + f[i + nx*j + nx*ny * 7] * ex[7] + f[i + nx*j + nx*ny * 8] * ex[8]; Uy[i + nx*j] = f[i + nx*j + nx*ny * 2] * ey[2] + f[i + nx*j + nx*ny * 4] * ey[4] + f[i + nx*j + nx*ny * 5] * ey[5] + f[i + nx*j + nx*ny * 6] * ey[6] + f[i + nx*j + nx*ny * 7] * ey[7] + f[i + nx*j + nx*ny * 8] * ey[8]; Ux[i + nx*j] /= rho[i + nx*j]; Uy[i + nx*j] /= rho[i + nx*j]; U[i + nx*j] = sqrt(pow(Ux[i + nx*j], 2) + pow(Uy[i + nx*j], 2)); } void LBM_GPU::Update() { dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z); dim3 dimGrid((nx + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X, (ny + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y, (a + BLOCK_SIZE_Z - 1) / BLOCK_SIZE_Z); Kernel_Update << < dimGrid, dimBlock >> > (d_fN, d_f, d_Ux, d_Uy, d_U, d_rho, d_ex, d_ey, nx, ny, a, d_is_solid_node); } void LBM_GPU::Momentum() { cudaMemcpy(f, d_f, nx*ny*a * sizeof(float), cudaMemcpyDeviceToHost); sum_Fx1 = 0.0; sum_Fx3 = 0.0; sum_Fx5 = 0.0; sum_Fx6 = 0.0; sum_Fx7 = 0.0; sum_Fx8 = 0.0; sum_Fy2 = 0.0; sum_Fy4 = 0.0; sum_Fy5 = 0.0; sum_Fy6 = 0.0; sum_Fy7 = 0.0; sum_Fy8 = 0.0; sum_Fx = 0.0; sum_Fy = 0.0; Fx = 0.0; Fy = 0.0; Cd = 0.0; Cl = 0.0; for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { sum_Fx1 = 0.0; sum_Fx3 = 0.0; sum_Fx5 = 0.0; sum_Fx6 = 0.0; sum_Fx7 = 0.0; sum_Fx8 = 0.0; sum_Fy2 = 0.0; sum_Fy4 = 0.0; sum_Fy5 = 0.0; sum_Fy6 = 0.0; sum_Fy7 = 0.0; sum_Fy8 = 0.0; sum_Fx = 0.0; sum_Fy = 0.0; in = i - 1; ip = i + 1; jn = j - 1; jp = j + 1; dist = sqrt(pow((float)i - ic, 2) + pow((float)j - jc, 2)); q = dist - r; if (!is_boundary_node[i + nx*j]) { if (!is_solid_node[i + nx*j]) { if (is_solid_near_node[i + nx*j]) { if (is_solid_node[ip + nx*j]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 3] = 2.0 * q * f[i + nx*j + nx*ny * 1] + (1.0 - 2.0*q)*f[(i - 1) + nx*j + nx*ny * 1]; else ftemp[i + nx*j + nx*ny * 3] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 1] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 3]; sum_Fx1 = ex[1] * (ftemp[i + nx*j + nx*ny * 3] + f[i + nx*j + nx*ny * 1]); } if (is_solid_node[i + nx*jp]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 4] = 2.0 * q * f[i + nx*j + nx*ny * 2] + (1.0 - 2.0*q)*f[i + nx*(j - 1) + nx*ny * 2]; else ftemp[i + nx*j + nx*ny * 4] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 2] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 4]; sum_Fy2 = ey[2] * (ftemp[i + nx*j + nx*ny * 4] + f[i + nx*j + nx*ny * 2]); } if (is_solid_node[in + nx*j]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 1] = 2.0 * q * f[i + nx*j + nx*ny * 3] + (1.0 - 2.0*q)*f[(i + 1) + nx*j + nx*ny * 3]; else ftemp[i + nx*j + nx*ny * 1] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 3] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 1]; sum_Fx3 = ex[3] * (ftemp[i + nx*j + nx*ny * 1] + f[i + nx*j + nx*ny * 3]); } if (is_solid_node[i + nx*jn]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 2] = 2.0 * q * f[i + nx*j + nx*ny * 4] + (1.0 - 2.0*q)*f[i + nx*(j + 1) + nx*ny * 4]; else ftemp[i + nx*j + nx*ny * 2] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 4] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 2]; sum_Fy4 = ey[4] * (ftemp[i + nx*j + nx*ny * 2] + f[i + nx*j + nx*ny * 4]); } if (is_solid_node[ip + nx*jp]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 7] = 2.0 * q * f[i + nx*j + nx*ny * 5] + (1.0 - 2.0*q)*f[(i - 1) + nx*(j - 1) + nx*ny * 5]; else ftemp[i + nx*j + nx*ny * 7] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 5] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 7]; sum_Fx5 = ex[5] * (ftemp[i + nx*j + nx*ny * 7] + f[i + nx*j + nx*ny * 5]); sum_Fy5 = ey[5] * (ftemp[i + nx*j + nx*ny * 7] + f[i + nx*j + nx*ny * 5]); } if (is_solid_node[in + nx*jp]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 8] = 2.0 * q * f[i + nx*j + nx*ny * 6] + (1.0 - 2.0*q)*f[(i + 1) + nx*(j - 1) + nx*ny * 6]; else ftemp[i + nx*j + nx*ny * 8] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 6] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 8]; sum_Fx6 = ex[6] * (ftemp[i + nx*j + nx*ny * 8] + f[i + nx*j + nx*ny * 6]); sum_Fy6 = ey[6] * (ftemp[i + nx*j + nx*ny * 8] + f[i + nx*j + nx*ny * 6]); } if (is_solid_node[in + nx*jn]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 5] = 2.0 * q * f[i + nx*j + nx*ny * 7] + (1.0 - 2.0*q)*f[(i + 1) + nx*(j + 1) + nx*ny * 7]; else ftemp[i + nx*j + nx*ny * 5] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 7] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 5]; sum_Fx7 = ex[7] * (ftemp[i + nx*j + nx*ny * 5] + f[i + nx*j + nx*ny * 7]); sum_Fy7 = ey[7] * (ftemp[i + nx*j + nx*ny * 5] + f[i + nx*j + nx*ny * 7]); } if (is_solid_node[ip + nx*jn]) { if (q < 0.5) ftemp[i + nx*j + nx*ny * 6] = 2.0 * q * f[i + nx*j + nx*ny * 8] + (1.0 - 2.0*q)*f[(i - 1) + nx*(j + 1) + nx*ny * 8]; else ftemp[i + nx*j + nx*ny * 6] = (1.0 / (2.0*q))*f[i + nx*j + nx*ny * 8] + (2.0*q - 1.0) / (2.0*q)*f[i + nx*j + nx*ny * 6]; sum_Fx8 = ex[8] * (ftemp[i + nx*j + nx*ny * 6] + f[i + nx*j + nx*ny * 8]); sum_Fy8 = ey[8] * (ftemp[i + nx*j + nx*ny * 6] + f[i + nx*j + nx*ny * 8]); } sum_Fx = sum_Fx1 + sum_Fx3 + sum_Fx5 + sum_Fx6 + sum_Fx7 + sum_Fx8; sum_Fy = sum_Fy2 + sum_Fy4 + sum_Fy5 + sum_Fy6 + sum_Fy7 + sum_Fy8; Fx = Fx + sum_Fx; Fy = Fy + sum_Fy; } } } } } Cd = 2.0*Fx / (rho1*pow((2.0 / 3.0)*Um, 2)*snx); Cl = 2.0*Fy / (rho1*pow((2.0 / 3.0)*Um, 2)*sny); fout_GPU_Cd << Cd << "\t" << Cl << endl; } void LBM_GPU::Print() { cudaMemcpy(Ux, d_Ux, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(Uy, d_Uy, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(U, d_U, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(rho, d_rho, nx*ny * sizeof(float), cudaMemcpyDeviceToHost); // ============================================================================ // // CHANGE LBM -> PHYSICAL // ============================================================================ // for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { Ux_p[i + nx*j] = Ux[i + nx*j]; Uy_p[i + nx*j] = Uy[i + nx*j]; U_p[i + nx*j] = U[i + nx*j]; P[i + nx*j] = rho[i + nx*j] / (3.0); } } // ============================================================================ // fout_GPU << endl; fout_GPU << "variables = X Y Ux Uy U rho P" << endl; fout_GPU << "zone i=" << nx << " j=" << ny << endl; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { fout_GPU << i << "\t" << j << "\t" << Ux[i + nx*j] << "\t" << Uy[i + nx*j] << "\t" << U[i + nx*j] << "\t" << rho[i + nx*j] << "\t" << P[i + nx*j] << endl; } } fout_GPU << endl; i = 0; fout_GPU_Ux << "variables = X Y Ux" << endl; fout_GPU_Ux << "zone i=" << nx << " j=" << ny << endl; for (j = 0; j < ny; j++) { fout_GPU_Ux << i << "\t" << j << "\t" << Ux[i + nx*j] << endl; } fout_GPU_Ux << endl; } LBM_GPU::~LBM_GPU() { cudaFree(d_Ux0); cudaFree(d_is_boundary_node); cudaFree(d_is_solid_node); cudaFree(d_f); cudaFree(d_fN); cudaFree(d_ftemp); cudaFree(d_feq); cudaFree(d_Ux); cudaFree(d_Uy); cudaFree(d_rho); cudaFree(d_ex); cudaFree(d_ey); cudaFree(d_U); cudaFree(d_UN); cudaFree(d_UxN); cudaFree(d_UyN); cudaFree(rhoN); delete[] Ux0; delete[] Ux0_p; delete[] P; delete[] Uy_p; delete[] Ux_p; delete[] U_p; delete[] ey; delete[] ex; delete[] fN; delete[] feq; delete[] ftemp; delete[] f; delete[] rhoN; delete[] UyN; delete[] UxN; delete[] UN; delete[] rho; delete[] Uy; delete[] Ux; delete[] U; delete[] is_boundary_node; delete[] is_solid_node; delete[] is_solid_near_node; cout << endl << "Done!" << endl; }
5695301512fbdb0aa1b4443e7efbf00711529ddf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <iomanip> #include <iostream> #include <math.h> #include <sstream> #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #include "utils.h" #define MAX_THREADS_PER_BLOCK 512 // NOTE "scan" and "prefix-sum" (psum) are used interchangably in this context __global__ void block_psum(const unsigned int * const g_in, unsigned int * const g_out, unsigned int * const g_sums, const size_t n) { extern __shared__ unsigned int smem[]; const size_t bx = blockIdx.x * blockDim.x; const size_t tx = threadIdx.x; const size_t px = bx + tx; int offset = 1; // init smem[2*tx] = g_in[2*px]; smem[2*tx+1] = g_in[2*px+1]; //// // up sweep //// for (int d = n >> 1; d > 0; d >>= 1) { __syncthreads(); if (tx < d) { int ai = offset * (2*tx+1) - 1; int bi = offset * (2*tx+2) - 1; smem[bi] += smem[ai]; } offset <<= 1; } // save block sum and clear last element if (tx == 0) { if (g_sums != NULL) g_sums[blockIdx.x] = smem[n-1]; smem[n-1] = 0; } //// // down sweep //// for (int d = 1; d < n; d <<= 1) { offset >>= 1; __syncthreads(); if (tx < d) { int ai = offset * (2*tx+1) - 1; int bi = offset * (2*tx+2) - 1; // swap unsigned int t = smem[ai]; smem[ai] = smem[bi]; smem[bi] += t; } } __syncthreads(); // save scan result g_out[2*px] = smem[2*tx]; g_out[2*px+1] = smem[2*tx+1]; } __global__ void scatter_incr( unsigned int * const d_array, const unsigned int * const d_incr) { const size_t bx = 2 * blockDim.x * blockIdx.x; const size_t tx = threadIdx.x; const unsigned int u = d_incr[blockIdx.x]; d_array[bx + 2*tx] += u; d_array[bx + 2*tx+1] += u; } // TODO 1) current version only works for len <= MAX_THREADS_PER_BLOCK * MAX_THREADS_PER_BLOCK // TODO 2) current version doesnt handle bank conflicts void psum(const unsigned int * const d_in, unsigned int * const d_out, const size_t len) { const unsigned int nthreads = MAX_THREADS_PER_BLOCK; const unsigned int block_size = 2 * nthreads; const unsigned int smem = block_size * sizeof(unsigned int); // n = smallest multiple of block_size such that larger than or equal to len const size_t n = len % block_size == 0 ? len : (1+len/block_size)*block_size; // number of blocks int nblocks = n/block_size; // allocate memories on gpu unsigned int *d_scan, *d_sums, *d_incr; checkCudaErrors(hipMalloc(&d_scan, sizeof(unsigned int)*n)); checkCudaErrors(hipMalloc(&d_sums, sizeof(unsigned int)*nblocks)); checkCudaErrors(hipMalloc(&d_incr, sizeof(unsigned int)*nblocks)); // scan array by blocks (block_size = 2 * num threads) hipLaunchKernelGGL(( block_psum), dim3(nblocks), dim3(nthreads), smem, 0, d_in, d_scan, d_sums, block_size); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // scan block sums // TODO case when nblocks is bigger than block_size (see TODO 1) hipLaunchKernelGGL(( block_psum), dim3(1), dim3(nthreads), smem, 0, d_sums, d_incr, NULL, block_size); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // scatter block sums back to scanned blocks hipLaunchKernelGGL(( scatter_incr), dim3(nblocks), dim3(nthreads), 0, 0, d_scan, d_incr); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // copy scan result back to d_out (cutoff at length len) checkCudaErrors(hipMemcpy(d_out, d_scan, sizeof(unsigned int)*len, hipMemcpyDeviceToDevice)); // free allocated memories checkCudaErrors(hipFree(d_incr)); checkCudaErrors(hipFree(d_sums)); checkCudaErrors(hipFree(d_scan)); } __global__ void histo_01(const unsigned int * const d_data, unsigned int * const d_histo, const unsigned int bit, const size_t len) { int px = blockDim.x * blockIdx.x + threadIdx.x; if (px >= len) return; const unsigned int bin = (d_data[px] >> bit) & 1U; atomicAdd(&(d_histo[bin]), 1); } __global__ void map_ones(const unsigned int * const d_data, unsigned int * const d_ones, const unsigned int bit, const size_t len) { const unsigned int px = blockDim.x * blockIdx.x + threadIdx.x; if (px >= len) return; d_ones[px] = (d_data[px] >> bit) & 1U; // =1 if d_data[px] at bit position is 1, =0 otherwise } __global__ void flip_01( unsigned int * const d_bits, const size_t len) { const unsigned int px = blockDim.x * blockIdx.x + threadIdx.x; if (px >= len) return; d_bits[px] ^= 1U; // toggle 0(1) to 1(0) } __global__ void permute(const unsigned int * const d_in, unsigned int * const d_out, const unsigned int * const d_zeros, const unsigned int * const d_scan0, const unsigned int * const d_scan1, const unsigned int * const d_h01, const size_t len) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; unsigned int pos = (d_zeros[idx]) ? d_scan0[idx] : d_scan1[idx] + d_h01[0]; d_out[pos] = d_in[idx]; } void bitonic_sort( unsigned int * const d_inputVals, unsigned int * const d_inputPos, unsigned int * const d_outputVals, unsigned int * const d_outputPos, const size_t numElems) { unsigned int nthreads = MAX_THREADS_PER_BLOCK; unsigned int nblocks = 1 + numElems/MAX_THREADS_PER_BLOCK; // allocate memories on gpu unsigned int * d_h01; // histo of 0's and 1's checkCudaErrors(hipMalloc(&d_h01, sizeof(unsigned int)*2)); unsigned int * d_p01; // predicate of 0's or 1's checkCudaErrors(hipMalloc(&d_p01, sizeof(unsigned int)*numElems)); unsigned int * d_scan0; // scan of d_p01 when d_p01 is flipped to represent 0's checkCudaErrors(hipMalloc(&d_scan0, sizeof(unsigned int)*numElems)); unsigned int * d_scan1; // scan of d_p01 when d_p01 is flipeed to represent 1's checkCudaErrors(hipMalloc(&d_scan1, sizeof(unsigned int)*numElems)); // ping pong (dummy) pointers unsigned int *d_ping1, *d_pong1, *d_ping2, *d_pong2; // loop from lowest bit to highest bit for (unsigned int bit = 0U; bit < sizeof(unsigned int) * CHAR_BIT ; bit++) { // ping pong input/output pointers (depending on bit is odd/even) d_ping1 = (bit & 1) ? d_outputVals : d_inputVals; d_pong1 = (bit & 1) ? d_inputVals : d_outputVals; d_ping2 = (bit & 1) ? d_outputPos : d_inputPos; d_pong2 = (bit & 1) ? d_inputPos : d_outputPos; // reset histo to zeros at each bin checkCudaErrors(hipMemset(d_h01, 0U, sizeof(unsigned int)*2)); // perform histo at bit position hipLaunchKernelGGL(( histo_01), dim3(nblocks), dim3(nthreads), 0, 0, d_ping1, d_h01, bit, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // map position of ones (at current bit position) to d_p01 hipLaunchKernelGGL(( map_ones), dim3(nblocks), dim3(nthreads), 0, 0, d_ping1, d_p01, bit, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // scan predicate of ones psum(d_p01, d_scan1, numElems); // flip d_p01 to represent predicate of zeros hipLaunchKernelGGL(( flip_01), dim3(nblocks), dim3(nthreads), 0, 0, d_p01, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // scan predicate of zeros psum(d_p01, d_scan0, numElems); // combine above results to get sorted position (wrt current bit position) hipLaunchKernelGGL(( permute), dim3(nblocks), dim3(nthreads), 0, 0, d_ping1, d_pong1, d_p01, d_scan0, d_scan1, d_h01, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( permute), dim3(nblocks), dim3(nthreads), 0, 0, d_ping2, d_pong2, d_p01, d_scan0, d_scan1, d_h01, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } // copy results to out{vals,pos} if numElems is even if (numElems & 1 == 0) { checkCudaErrors(hipMemcpy(d_outputVals, d_inputVals, sizeof(unsigned int)*numElems, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_outputPos, d_inputPos, sizeof(unsigned int)*numElems, hipMemcpyDeviceToDevice)); } // gpu clean up checkCudaErrors(hipFree(d_scan1)); checkCudaErrors(hipFree(d_scan0)); checkCudaErrors(hipFree(d_p01)); checkCudaErrors(hipFree(d_h01)); } int main(int argc, char* argv[]) { size_t len = 10; // default len 10 if (argc >= 2) { std::istringstream ss(argv[1]); int tmp; if (ss >> tmp) len = tmp; } thrust::host_vector<unsigned int> h_val(len); thrust::host_vector<unsigned int> h_pos(len); // generate random uints to fill h_val thrust::generate(h_val.begin(), h_val.end(), rand); // set elements of h_pos to 0, 1, 2, ... thrust::sequence(h_pos.begin(), h_pos.end()); // make device vectors thrust::device_vector<unsigned int> d_inval(h_val); thrust::device_vector<unsigned int> d_outval(h_val); thrust::device_vector<unsigned int> d_inpos(h_pos); thrust::device_vector<unsigned int> d_outpos(h_pos); // corresponding device pointers thrust::device_ptr<unsigned int> dp_inval = d_inval.data(); thrust::device_ptr<unsigned int> dp_outval = d_outval.data(); thrust::device_ptr<unsigned int> dp_inpos = d_inpos.data(); thrust::device_ptr<unsigned int> dp_outpos = d_outpos.data(); bitonic_sort(thrust::raw_pointer_cast(dp_inval), thrust::raw_pointer_cast(dp_inpos), thrust::raw_pointer_cast(dp_outval), thrust::raw_pointer_cast(dp_outpos), len); // simple output int width = (int)log10(len) + 1; for (size_t i = 0; i < len; i++) std::cout << std::setw(width) << d_outpos[i] << ": " << d_outval[i] << "\n"; return 0; }
5695301512fbdb0aa1b4443e7efbf00711529ddf.cu
#include <algorithm> #include <iomanip> #include <iostream> #include <math.h> #include <sstream> #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #include "utils.h" #define MAX_THREADS_PER_BLOCK 512 // NOTE "scan" and "prefix-sum" (psum) are used interchangably in this context __global__ void block_psum(const unsigned int * const g_in, unsigned int * const g_out, unsigned int * const g_sums, const size_t n) { extern __shared__ unsigned int smem[]; const size_t bx = blockIdx.x * blockDim.x; const size_t tx = threadIdx.x; const size_t px = bx + tx; int offset = 1; // init smem[2*tx] = g_in[2*px]; smem[2*tx+1] = g_in[2*px+1]; //// // up sweep //// for (int d = n >> 1; d > 0; d >>= 1) { __syncthreads(); if (tx < d) { int ai = offset * (2*tx+1) - 1; int bi = offset * (2*tx+2) - 1; smem[bi] += smem[ai]; } offset <<= 1; } // save block sum and clear last element if (tx == 0) { if (g_sums != NULL) g_sums[blockIdx.x] = smem[n-1]; smem[n-1] = 0; } //// // down sweep //// for (int d = 1; d < n; d <<= 1) { offset >>= 1; __syncthreads(); if (tx < d) { int ai = offset * (2*tx+1) - 1; int bi = offset * (2*tx+2) - 1; // swap unsigned int t = smem[ai]; smem[ai] = smem[bi]; smem[bi] += t; } } __syncthreads(); // save scan result g_out[2*px] = smem[2*tx]; g_out[2*px+1] = smem[2*tx+1]; } __global__ void scatter_incr( unsigned int * const d_array, const unsigned int * const d_incr) { const size_t bx = 2 * blockDim.x * blockIdx.x; const size_t tx = threadIdx.x; const unsigned int u = d_incr[blockIdx.x]; d_array[bx + 2*tx] += u; d_array[bx + 2*tx+1] += u; } // TODO 1) current version only works for len <= MAX_THREADS_PER_BLOCK * MAX_THREADS_PER_BLOCK // TODO 2) current version doesnt handle bank conflicts void psum(const unsigned int * const d_in, unsigned int * const d_out, const size_t len) { const unsigned int nthreads = MAX_THREADS_PER_BLOCK; const unsigned int block_size = 2 * nthreads; const unsigned int smem = block_size * sizeof(unsigned int); // n = smallest multiple of block_size such that larger than or equal to len const size_t n = len % block_size == 0 ? len : (1+len/block_size)*block_size; // number of blocks int nblocks = n/block_size; // allocate memories on gpu unsigned int *d_scan, *d_sums, *d_incr; checkCudaErrors(cudaMalloc(&d_scan, sizeof(unsigned int)*n)); checkCudaErrors(cudaMalloc(&d_sums, sizeof(unsigned int)*nblocks)); checkCudaErrors(cudaMalloc(&d_incr, sizeof(unsigned int)*nblocks)); // scan array by blocks (block_size = 2 * num threads) block_psum<<<nblocks, nthreads, smem>>>(d_in, d_scan, d_sums, block_size); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // scan block sums // TODO case when nblocks is bigger than block_size (see TODO 1) block_psum<<<1, nthreads, smem>>>(d_sums, d_incr, NULL, block_size); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // scatter block sums back to scanned blocks scatter_incr<<<nblocks, nthreads>>>(d_scan, d_incr); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // copy scan result back to d_out (cutoff at length len) checkCudaErrors(cudaMemcpy(d_out, d_scan, sizeof(unsigned int)*len, cudaMemcpyDeviceToDevice)); // free allocated memories checkCudaErrors(cudaFree(d_incr)); checkCudaErrors(cudaFree(d_sums)); checkCudaErrors(cudaFree(d_scan)); } __global__ void histo_01(const unsigned int * const d_data, unsigned int * const d_histo, const unsigned int bit, const size_t len) { int px = blockDim.x * blockIdx.x + threadIdx.x; if (px >= len) return; const unsigned int bin = (d_data[px] >> bit) & 1U; atomicAdd(&(d_histo[bin]), 1); } __global__ void map_ones(const unsigned int * const d_data, unsigned int * const d_ones, const unsigned int bit, const size_t len) { const unsigned int px = blockDim.x * blockIdx.x + threadIdx.x; if (px >= len) return; d_ones[px] = (d_data[px] >> bit) & 1U; // =1 if d_data[px] at bit position is 1, =0 otherwise } __global__ void flip_01( unsigned int * const d_bits, const size_t len) { const unsigned int px = blockDim.x * blockIdx.x + threadIdx.x; if (px >= len) return; d_bits[px] ^= 1U; // toggle 0(1) to 1(0) } __global__ void permute(const unsigned int * const d_in, unsigned int * const d_out, const unsigned int * const d_zeros, const unsigned int * const d_scan0, const unsigned int * const d_scan1, const unsigned int * const d_h01, const size_t len) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= len) return; unsigned int pos = (d_zeros[idx]) ? d_scan0[idx] : d_scan1[idx] + d_h01[0]; d_out[pos] = d_in[idx]; } void bitonic_sort( unsigned int * const d_inputVals, unsigned int * const d_inputPos, unsigned int * const d_outputVals, unsigned int * const d_outputPos, const size_t numElems) { unsigned int nthreads = MAX_THREADS_PER_BLOCK; unsigned int nblocks = 1 + numElems/MAX_THREADS_PER_BLOCK; // allocate memories on gpu unsigned int * d_h01; // histo of 0's and 1's checkCudaErrors(cudaMalloc(&d_h01, sizeof(unsigned int)*2)); unsigned int * d_p01; // predicate of 0's or 1's checkCudaErrors(cudaMalloc(&d_p01, sizeof(unsigned int)*numElems)); unsigned int * d_scan0; // scan of d_p01 when d_p01 is flipped to represent 0's checkCudaErrors(cudaMalloc(&d_scan0, sizeof(unsigned int)*numElems)); unsigned int * d_scan1; // scan of d_p01 when d_p01 is flipeed to represent 1's checkCudaErrors(cudaMalloc(&d_scan1, sizeof(unsigned int)*numElems)); // ping pong (dummy) pointers unsigned int *d_ping1, *d_pong1, *d_ping2, *d_pong2; // loop from lowest bit to highest bit for (unsigned int bit = 0U; bit < sizeof(unsigned int) * CHAR_BIT ; bit++) { // ping pong input/output pointers (depending on bit is odd/even) d_ping1 = (bit & 1) ? d_outputVals : d_inputVals; d_pong1 = (bit & 1) ? d_inputVals : d_outputVals; d_ping2 = (bit & 1) ? d_outputPos : d_inputPos; d_pong2 = (bit & 1) ? d_inputPos : d_outputPos; // reset histo to zeros at each bin checkCudaErrors(cudaMemset(d_h01, 0U, sizeof(unsigned int)*2)); // perform histo at bit position histo_01<<<nblocks, nthreads>>>(d_ping1, d_h01, bit, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // map position of ones (at current bit position) to d_p01 map_ones<<<nblocks, nthreads>>>(d_ping1, d_p01, bit, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // scan predicate of ones psum(d_p01, d_scan1, numElems); // flip d_p01 to represent predicate of zeros flip_01<<<nblocks, nthreads>>>(d_p01, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // scan predicate of zeros psum(d_p01, d_scan0, numElems); // combine above results to get sorted position (wrt current bit position) permute<<<nblocks, nthreads>>>(d_ping1, d_pong1, d_p01, d_scan0, d_scan1, d_h01, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); permute<<<nblocks, nthreads>>>(d_ping2, d_pong2, d_p01, d_scan0, d_scan1, d_h01, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } // copy results to out{vals,pos} if numElems is even if (numElems & 1 == 0) { checkCudaErrors(cudaMemcpy(d_outputVals, d_inputVals, sizeof(unsigned int)*numElems, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_outputPos, d_inputPos, sizeof(unsigned int)*numElems, cudaMemcpyDeviceToDevice)); } // gpu clean up checkCudaErrors(cudaFree(d_scan1)); checkCudaErrors(cudaFree(d_scan0)); checkCudaErrors(cudaFree(d_p01)); checkCudaErrors(cudaFree(d_h01)); } int main(int argc, char* argv[]) { size_t len = 10; // default len 10 if (argc >= 2) { std::istringstream ss(argv[1]); int tmp; if (ss >> tmp) len = tmp; } thrust::host_vector<unsigned int> h_val(len); thrust::host_vector<unsigned int> h_pos(len); // generate random uints to fill h_val thrust::generate(h_val.begin(), h_val.end(), rand); // set elements of h_pos to 0, 1, 2, ... thrust::sequence(h_pos.begin(), h_pos.end()); // make device vectors thrust::device_vector<unsigned int> d_inval(h_val); thrust::device_vector<unsigned int> d_outval(h_val); thrust::device_vector<unsigned int> d_inpos(h_pos); thrust::device_vector<unsigned int> d_outpos(h_pos); // corresponding device pointers thrust::device_ptr<unsigned int> dp_inval = d_inval.data(); thrust::device_ptr<unsigned int> dp_outval = d_outval.data(); thrust::device_ptr<unsigned int> dp_inpos = d_inpos.data(); thrust::device_ptr<unsigned int> dp_outpos = d_outpos.data(); bitonic_sort(thrust::raw_pointer_cast(dp_inval), thrust::raw_pointer_cast(dp_inpos), thrust::raw_pointer_cast(dp_outval), thrust::raw_pointer_cast(dp_outpos), len); // simple output int width = (int)log10(len) + 1; for (size_t i = 0; i < len; i++) std::cout << std::setw(width) << d_outpos[i] << ": " << d_outval[i] << "\n"; return 0; }
bd6ea7b74aa235a67f41bcd1706e69dc4a507942.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements a separable convolution filter * of a 2D image with an arbitrary kernel. */ // CUDA runtime #include <hip/hip_runtime.h> // Utilities and system includes #include <helper_functions.h> #include <helper_cuda.h> #include "convolutionSeparable_common.h" //////////////////////////////////////////////////////////////////////////////// // Reference CPU convolution //////////////////////////////////////////////////////////////////////////////// extern "C" void convolutionRowCPU( float *h_Result, float *h_Data, float *h_Kernel, int imageW, int imageH, int kernelR ); extern "C" void convolutionColumnCPU( float *h_Result, float *h_Data, float *h_Kernel, int imageW, int imageH, int kernelR ); int imageW,imageH; //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // start logs printf("[%s] - Starting...\n", argv[0]); float *h_Kernel, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; float *d_Input, *d_Output, *d_Buffer; if(argc==2) { printf("Size = "); printf("%d",atoi(argv[1])); imageW = atoi(argv[1]); imageH = atoi(argv[1]); } else { imageW = 3072; imageH = 3072; } const int iterations = 16; StopWatchInterface *hTimer = NULL; //Use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaDevice(argc, (const char **)argv); sdkCreateTimer(&hTimer); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); h_Kernel = (float *)malloc(KERNEL_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); srand(200); for (unsigned int i = 0; i < KERNEL_LENGTH; i++) { h_Kernel[i] = (float)(rand() % 16); } for (unsigned i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)(rand() % 16); } printf("Allocating and initializing CUDA arrays...\n"); checkCudaErrors(hipMalloc((void **)&d_Input, imageW * imageH * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_Output, imageW * imageH * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_Buffer , imageW * imageH * sizeof(float))); setConvolutionKernel(h_Kernel); checkCudaErrors(hipMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), hipMemcpyHostToDevice)); printf("Running GPU convolution (%u identical iterations)...\n\n", iterations); for (int i = -1; i < iterations; i++) { //i == -1 -- warmup iteration if (i == 0) { checkCudaErrors(hipDeviceSynchronize()); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); } convolutionRowsGPU( d_Buffer, d_Input, imageW, imageH ); convolutionColumnsGPU( d_Output, d_Buffer, imageW, imageH ); } checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&hTimer); double gpuTime = 0.001 * sdkGetTimerValue(&hTimer) / (double)iterations; printf("convolutionSeparable, Throughput = %.4f MPixels/sec, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %u\n", (1.0e-6 * (double)(imageW * imageH)/ gpuTime), gpuTime, (imageW * imageH), 1, 0); printf("\nReading back GPU results...\n\n"); checkCudaErrors(hipMemcpy(h_OutputGPU, d_Output, imageW * imageH * sizeof(float), hipMemcpyDeviceToHost)); printf("Checking the results...\n"); printf(" ...running convolutionRowCPU()\n"); convolutionRowCPU( h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...running convolutionColumnCPU()\n"); convolutionColumnCPU( h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...comparing the results\n"); double sum = 0, delta = 0; for (unsigned i = 0; i < imageW * imageH; i++) { delta += (h_OutputGPU[i] - h_OutputCPU[i]) * (h_OutputGPU[i] - h_OutputCPU[i]); sum += h_OutputCPU[i] * h_OutputCPU[i]; } double L2norm = sqrt(delta / sum); printf(" ...Relative L2 norm: %E\n\n", L2norm); printf("Shutting down...\n"); checkCudaErrors(hipFree(d_Buffer)); checkCudaErrors(hipFree(d_Output)); checkCudaErrors(hipFree(d_Input)); free(h_OutputGPU); free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Kernel); sdkDeleteTimer(&hTimer); if (L2norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
bd6ea7b74aa235a67f41bcd1706e69dc4a507942.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample implements a separable convolution filter * of a 2D image with an arbitrary kernel. */ // CUDA runtime #include <cuda_runtime.h> // Utilities and system includes #include <helper_functions.h> #include <helper_cuda.h> #include "convolutionSeparable_common.h" //////////////////////////////////////////////////////////////////////////////// // Reference CPU convolution //////////////////////////////////////////////////////////////////////////////// extern "C" void convolutionRowCPU( float *h_Result, float *h_Data, float *h_Kernel, int imageW, int imageH, int kernelR ); extern "C" void convolutionColumnCPU( float *h_Result, float *h_Data, float *h_Kernel, int imageW, int imageH, int kernelR ); int imageW,imageH; //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // start logs printf("[%s] - Starting...\n", argv[0]); float *h_Kernel, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; float *d_Input, *d_Output, *d_Buffer; if(argc==2) { printf("Size = "); printf("%d",atoi(argv[1])); imageW = atoi(argv[1]); imageH = atoi(argv[1]); } else { imageW = 3072; imageH = 3072; } const int iterations = 16; StopWatchInterface *hTimer = NULL; //Use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaDevice(argc, (const char **)argv); sdkCreateTimer(&hTimer); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); h_Kernel = (float *)malloc(KERNEL_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); srand(200); for (unsigned int i = 0; i < KERNEL_LENGTH; i++) { h_Kernel[i] = (float)(rand() % 16); } for (unsigned i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)(rand() % 16); } printf("Allocating and initializing CUDA arrays...\n"); checkCudaErrors(cudaMalloc((void **)&d_Input, imageW * imageH * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_Output, imageW * imageH * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_Buffer , imageW * imageH * sizeof(float))); setConvolutionKernel(h_Kernel); checkCudaErrors(cudaMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), cudaMemcpyHostToDevice)); printf("Running GPU convolution (%u identical iterations)...\n\n", iterations); for (int i = -1; i < iterations; i++) { //i == -1 -- warmup iteration if (i == 0) { checkCudaErrors(cudaDeviceSynchronize()); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); } convolutionRowsGPU( d_Buffer, d_Input, imageW, imageH ); convolutionColumnsGPU( d_Output, d_Buffer, imageW, imageH ); } checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&hTimer); double gpuTime = 0.001 * sdkGetTimerValue(&hTimer) / (double)iterations; printf("convolutionSeparable, Throughput = %.4f MPixels/sec, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %u\n", (1.0e-6 * (double)(imageW * imageH)/ gpuTime), gpuTime, (imageW * imageH), 1, 0); printf("\nReading back GPU results...\n\n"); checkCudaErrors(cudaMemcpy(h_OutputGPU, d_Output, imageW * imageH * sizeof(float), cudaMemcpyDeviceToHost)); printf("Checking the results...\n"); printf(" ...running convolutionRowCPU()\n"); convolutionRowCPU( h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...running convolutionColumnCPU()\n"); convolutionColumnCPU( h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS ); printf(" ...comparing the results\n"); double sum = 0, delta = 0; for (unsigned i = 0; i < imageW * imageH; i++) { delta += (h_OutputGPU[i] - h_OutputCPU[i]) * (h_OutputGPU[i] - h_OutputCPU[i]); sum += h_OutputCPU[i] * h_OutputCPU[i]; } double L2norm = sqrt(delta / sum); printf(" ...Relative L2 norm: %E\n\n", L2norm); printf("Shutting down...\n"); checkCudaErrors(cudaFree(d_Buffer)); checkCudaErrors(cudaFree(d_Output)); checkCudaErrors(cudaFree(d_Input)); free(h_OutputGPU); free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Kernel); sdkDeleteTimer(&hTimer); if (L2norm > 1e-6) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
4911b96c690d8c33b2e4bb6c8986e7ea79fa47e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "tf_texture_1d.h" #include "helper_math.cuh" #include <cuMat/src/Context.h> #include "renderer_color.cuh" BEGIN_RENDERER_NAMESPACE __device__ float4 fetch(float density, const TfTexture1D::GpuData& gpuData) { float4 result{ 0.0f, 0.0f, 0.0f, 0.0f }; //This can be done using binary search but now simply do linear search. for (int i = 0; i < gpuData.sizeColor_ + 1; ++i) { auto leftDensity = i == 0 ? 0.0f : gpuData.densityAxisColor_[i - 1]; auto rightDensity = i == gpuData.sizeColor_ ? 1.0f : gpuData.densityAxisColor_[i]; if (density < rightDensity) { auto leftColor = i == 0 ? gpuData.colorAxis_[0] : gpuData.colorAxis_[i - 1]; auto rightColor = i == gpuData.sizeColor_ ? gpuData.colorAxis_[gpuData.sizeColor_ - 1] : gpuData.colorAxis_[i]; auto t = (density - leftDensity) / (rightDensity - leftDensity); auto xyz = kernel::labToXyz(lerp(leftColor, rightColor, t)); result.x = xyz.x; result.y = xyz.y; result.z = xyz.z; break; } } for (int i = 0; i < gpuData.sizeOpacity_ + 1; ++i) { auto leftDensity = i == 0 ? 0.0f : gpuData.densityAxisOpacity_[i - 1]; auto rightDensity = i == gpuData.sizeOpacity_ ? 1.0f : gpuData.densityAxisOpacity_[i]; if (density < rightDensity) { auto leftOpacity = i == 0 ? gpuData.opacityAxis_[0] : gpuData.opacityAxis_[i - 1]; auto rightOpacity = i == gpuData.sizeOpacity_ ? gpuData.opacityAxis_[gpuData.sizeOpacity_ - 1] : gpuData.opacityAxis_[i]; auto t = (density - leftDensity) / (rightDensity - leftDensity); result.w = lerp(leftOpacity, rightOpacity, t); break; } } return result; } __global__ void ComputeCudaTextureKernel( dim3 virtualSize, TfTexture1D::GpuData gpuData) { CUMAT_KERNEL_1D_LOOP(x, virtualSize) auto density = (x+0.5f) / static_cast<float>(virtualSize.x); auto xyzo = fetch(density, gpuData); auto rgb = kernel::xyzToRgb(make_float3(xyzo)); surf1Dwrite(xyzo.x, gpuData.surfaceObjectXYZ_, x * 16); surf1Dwrite(xyzo.y, gpuData.surfaceObjectXYZ_, x * 16 + 4); surf1Dwrite(xyzo.z, gpuData.surfaceObjectXYZ_, x * 16 + 8); surf1Dwrite(xyzo.w, gpuData.surfaceObjectXYZ_, x * 16 + 12); surf1Dwrite(rgb.x, gpuData.surfaceObjectRGB_, x * 16); surf1Dwrite(rgb.y, gpuData.surfaceObjectRGB_, x * 16 + 4); surf1Dwrite(rgb.z, gpuData.surfaceObjectRGB_, x * 16 + 8); surf1Dwrite(xyzo.w, gpuData.surfaceObjectRGB_, x * 16 + 12); CUMAT_KERNEL_1D_LOOP_END } MY_API void computeCudaTexture(const TfTexture1D::GpuData& gpuData) { cuMat::Context& ctx = cuMat::Context::current(); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D(gpuData.cudaArraySize_, ComputeCudaTextureKernel); hipLaunchKernelGGL(( ComputeCudaTextureKernel) , dim3(cfg.block_count), dim3(cfg.thread_per_block), 0, ctx.stream() , cfg.virtual_size, gpuData); CUMAT_CHECK_ERROR(); } END_RENDERER_NAMESPACE
4911b96c690d8c33b2e4bb6c8986e7ea79fa47e2.cu
#include "tf_texture_1d.h" #include "helper_math.cuh" #include <cuMat/src/Context.h> #include "renderer_color.cuh" BEGIN_RENDERER_NAMESPACE __device__ float4 fetch(float density, const TfTexture1D::GpuData& gpuData) { float4 result{ 0.0f, 0.0f, 0.0f, 0.0f }; //This can be done using binary search but now simply do linear search. for (int i = 0; i < gpuData.sizeColor_ + 1; ++i) { auto leftDensity = i == 0 ? 0.0f : gpuData.densityAxisColor_[i - 1]; auto rightDensity = i == gpuData.sizeColor_ ? 1.0f : gpuData.densityAxisColor_[i]; if (density < rightDensity) { auto leftColor = i == 0 ? gpuData.colorAxis_[0] : gpuData.colorAxis_[i - 1]; auto rightColor = i == gpuData.sizeColor_ ? gpuData.colorAxis_[gpuData.sizeColor_ - 1] : gpuData.colorAxis_[i]; auto t = (density - leftDensity) / (rightDensity - leftDensity); auto xyz = kernel::labToXyz(lerp(leftColor, rightColor, t)); result.x = xyz.x; result.y = xyz.y; result.z = xyz.z; break; } } for (int i = 0; i < gpuData.sizeOpacity_ + 1; ++i) { auto leftDensity = i == 0 ? 0.0f : gpuData.densityAxisOpacity_[i - 1]; auto rightDensity = i == gpuData.sizeOpacity_ ? 1.0f : gpuData.densityAxisOpacity_[i]; if (density < rightDensity) { auto leftOpacity = i == 0 ? gpuData.opacityAxis_[0] : gpuData.opacityAxis_[i - 1]; auto rightOpacity = i == gpuData.sizeOpacity_ ? gpuData.opacityAxis_[gpuData.sizeOpacity_ - 1] : gpuData.opacityAxis_[i]; auto t = (density - leftDensity) / (rightDensity - leftDensity); result.w = lerp(leftOpacity, rightOpacity, t); break; } } return result; } __global__ void ComputeCudaTextureKernel( dim3 virtualSize, TfTexture1D::GpuData gpuData) { CUMAT_KERNEL_1D_LOOP(x, virtualSize) auto density = (x+0.5f) / static_cast<float>(virtualSize.x); auto xyzo = fetch(density, gpuData); auto rgb = kernel::xyzToRgb(make_float3(xyzo)); surf1Dwrite(xyzo.x, gpuData.surfaceObjectXYZ_, x * 16); surf1Dwrite(xyzo.y, gpuData.surfaceObjectXYZ_, x * 16 + 4); surf1Dwrite(xyzo.z, gpuData.surfaceObjectXYZ_, x * 16 + 8); surf1Dwrite(xyzo.w, gpuData.surfaceObjectXYZ_, x * 16 + 12); surf1Dwrite(rgb.x, gpuData.surfaceObjectRGB_, x * 16); surf1Dwrite(rgb.y, gpuData.surfaceObjectRGB_, x * 16 + 4); surf1Dwrite(rgb.z, gpuData.surfaceObjectRGB_, x * 16 + 8); surf1Dwrite(xyzo.w, gpuData.surfaceObjectRGB_, x * 16 + 12); CUMAT_KERNEL_1D_LOOP_END } MY_API void computeCudaTexture(const TfTexture1D::GpuData& gpuData) { cuMat::Context& ctx = cuMat::Context::current(); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D(gpuData.cudaArraySize_, ComputeCudaTextureKernel); ComputeCudaTextureKernel <<< cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>> (cfg.virtual_size, gpuData); CUMAT_CHECK_ERROR(); } END_RENDERER_NAMESPACE
3afb021c6c07d4ab1cb0d239b4567f4863be505e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by mustafa on 6/3/20. // #include <hiprand/hiprand_kernel.h> #include "../headers/MC_Kernels.cuh" #include "../headers/MC_RandomWalk.cuh" __global__ void MCKernels::simulate(unsigned int seed, hiprandState_t *states, MC_Photon *_gpuPhotons, MC_FiberGenerator const mcFiberGenerator, MC_MLTissue tissue, int const n) { int idx = (int) (blockIdx.x * blockDim.x + threadIdx.x); if (idx < n) { hiprand_init(seed, idx, 0, &states[idx]); MC_Photon finalState = RandomWalk(states, idx, mcFiberGenerator, tissue); _gpuPhotons[idx] = finalState; } }
3afb021c6c07d4ab1cb0d239b4567f4863be505e.cu
// // Created by mustafa on 6/3/20. // #include <curand_kernel.h> #include "../headers/MC_Kernels.cuh" #include "../headers/MC_RandomWalk.cuh" __global__ void MCKernels::simulate(unsigned int seed, curandState_t *states, MC_Photon *_gpuPhotons, MC_FiberGenerator const mcFiberGenerator, MC_MLTissue tissue, int const n) { int idx = (int) (blockIdx.x * blockDim.x + threadIdx.x); if (idx < n) { curand_init(seed, idx, 0, &states[idx]); MC_Photon finalState = RandomWalk(states, idx, mcFiberGenerator, tissue); _gpuPhotons[idx] = finalState; } }
251575b5fdec41804491141ea6423c0748c2ec72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void scale_channels_kernel(float *in_w_h_c, int size, int channel_size, int batch_size, int scale_wh, float *scales_c, float *out) { const int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < size) { if (scale_wh) { int osd_index = index % channel_size + (index / batch_size)*channel_size; out[index] = in_w_h_c[index] * scales_c[osd_index]; } else { out[index] = in_w_h_c[index] * scales_c[index / channel_size]; } } }
251575b5fdec41804491141ea6423c0748c2ec72.cu
#include "includes.h" __global__ void scale_channels_kernel(float *in_w_h_c, int size, int channel_size, int batch_size, int scale_wh, float *scales_c, float *out) { const int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < size) { if (scale_wh) { int osd_index = index % channel_size + (index / batch_size)*channel_size; out[index] = in_w_h_c[index] * scales_c[osd_index]; } else { out[index] = in_w_h_c[index] * scales_c[index / channel_size]; } } }
aa690a98414a9131b719cb116aefcafd295fa8a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void printThreadIndex(int *A, const int nx, const int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d) " "global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); }
aa690a98414a9131b719cb116aefcafd295fa8a8.cu
#include "includes.h" __global__ void printThreadIndex(int *A, const int nx, const int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d) " "global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); }
ee3c70f6ef8a54f7c8122009d2d533100d05b6f1.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdint> #include <cstdlib> #include <algorithm> #include "../utils/SyncedMemory.h" #include "../utils/Timer.h" #include "pgm.h" #include "lab3.h" using namespace std; #define CHECK {\ auto e = hipDeviceSynchronize();\ if (e != hipSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\ abort();\ }\ } int main(int argc, char **argv) { Timer timer_count_position_t; timer_count_position_t.Start(); if (argc != 7) { printf("Usage: %s <background> <target> <mask> <offset x> <offset y> <output>\n", argv[0]); abort(); } bool sucb, suct, sucm; int wb, hb, cb, wt, ht, ct, wm, hm, cm; auto imgb = ReadNetpbm(wb, hb, cb, sucb, argv[1]); auto imgt = ReadNetpbm(wt, ht, ct, suct, argv[2]); auto imgm = ReadNetpbm(wm, hm, cm, sucm, argv[3]); if (not (sucb and suct and sucm)) { puts("Something wrong with reading the input image files."); abort(); } if (wt != wm or ht != hm) { puts("The mask and target image must have the same size."); abort(); } if (cm != 1) { puts("The mask image must be mono-colored."); abort(); } if (cb != 3 or ct != 3) { puts("The background and target image must be colored."); abort(); } const int oy = atoi(argv[4]), ox = atoi(argv[5]); const int SIZEB = wb*hb*3; const int SIZET = wt*ht*3; const int SIZEM = wm*hm; MemoryBuffer<float> background(SIZEB), target(SIZET), mask(SIZEM), output(SIZEB); auto background_s = background.CreateSync(SIZEB); auto target_s = target.CreateSync(SIZET); auto mask_s = mask.CreateSync(SIZEM); auto output_s = output.CreateSync(SIZEB); float *background_cpu = background_s.get_cpu_wo(); float *target_cpu = target_s.get_cpu_wo(); float *mask_cpu = mask_s.get_cpu_wo(); copy(imgb.get(), imgb.get()+SIZEB, background_cpu); copy(imgt.get(), imgt.get()+SIZET, target_cpu); copy(imgm.get(), imgm.get()+SIZEM, mask_cpu); Timer timer_count_position; timer_count_position.Start(); PoissonImageCloning( background_s.get_gpu_ro(), target_s.get_gpu_ro(), mask_s.get_gpu_ro(), output_s.get_gpu_wo(), wb, hb, wt, ht, oy, ox ); timer_count_position.Pause(); printf_timer(timer_count_position); unique_ptr<uint8_t[]> o(new uint8_t[SIZEB]); const float *o_cpu = output_s.get_cpu_ro(); transform(o_cpu, o_cpu+SIZEB, o.get(), [](float f) -> uint8_t { return max(min(int(f+0.5f), 255), 0); }); WritePPM(o.get(), wb, hb, argv[6]); timer_count_position_t.Pause(); printf_timer(timer_count_position_t); return 0; }
ee3c70f6ef8a54f7c8122009d2d533100d05b6f1.cu
#include <cstdio> #include <cstdint> #include <cstdlib> #include <algorithm> #include "../utils/SyncedMemory.h" #include "../utils/Timer.h" #include "pgm.h" #include "lab3.h" using namespace std; #define CHECK {\ auto e = cudaDeviceSynchronize();\ if (e != cudaSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\ abort();\ }\ } int main(int argc, char **argv) { Timer timer_count_position_t; timer_count_position_t.Start(); if (argc != 7) { printf("Usage: %s <background> <target> <mask> <offset x> <offset y> <output>\n", argv[0]); abort(); } bool sucb, suct, sucm; int wb, hb, cb, wt, ht, ct, wm, hm, cm; auto imgb = ReadNetpbm(wb, hb, cb, sucb, argv[1]); auto imgt = ReadNetpbm(wt, ht, ct, suct, argv[2]); auto imgm = ReadNetpbm(wm, hm, cm, sucm, argv[3]); if (not (sucb and suct and sucm)) { puts("Something wrong with reading the input image files."); abort(); } if (wt != wm or ht != hm) { puts("The mask and target image must have the same size."); abort(); } if (cm != 1) { puts("The mask image must be mono-colored."); abort(); } if (cb != 3 or ct != 3) { puts("The background and target image must be colored."); abort(); } const int oy = atoi(argv[4]), ox = atoi(argv[5]); const int SIZEB = wb*hb*3; const int SIZET = wt*ht*3; const int SIZEM = wm*hm; MemoryBuffer<float> background(SIZEB), target(SIZET), mask(SIZEM), output(SIZEB); auto background_s = background.CreateSync(SIZEB); auto target_s = target.CreateSync(SIZET); auto mask_s = mask.CreateSync(SIZEM); auto output_s = output.CreateSync(SIZEB); float *background_cpu = background_s.get_cpu_wo(); float *target_cpu = target_s.get_cpu_wo(); float *mask_cpu = mask_s.get_cpu_wo(); copy(imgb.get(), imgb.get()+SIZEB, background_cpu); copy(imgt.get(), imgt.get()+SIZET, target_cpu); copy(imgm.get(), imgm.get()+SIZEM, mask_cpu); Timer timer_count_position; timer_count_position.Start(); PoissonImageCloning( background_s.get_gpu_ro(), target_s.get_gpu_ro(), mask_s.get_gpu_ro(), output_s.get_gpu_wo(), wb, hb, wt, ht, oy, ox ); timer_count_position.Pause(); printf_timer(timer_count_position); unique_ptr<uint8_t[]> o(new uint8_t[SIZEB]); const float *o_cpu = output_s.get_cpu_ro(); transform(o_cpu, o_cpu+SIZEB, o.get(), [](float f) -> uint8_t { return max(min(int(f+0.5f), 255), 0); }); WritePPM(o.get(), wb, hb, argv[6]); timer_count_position_t.Pause(); printf_timer(timer_count_position_t); return 0; }
11283c6c62f0da7fadc0f966563bea36ffc8b266.hip
// !!! This is a file automatically generated by hipify!!! #include "CudaSplitEncseg.h" #include "CudaSplitEncsubface.h" #include "CudaInsertPoint.h" #include "CudaMesh.h" /* Host */ // This function assumes the input encmarker has be set correctly // in the initialization void initSubfaceEncmarkers( RealD& t_pointlist, IntD& t_trifacelist, TetHandleD& t_tri2tetlist, IntD& t_subfaceencmarker, IntD& t_tetlist, int& numofsubface ) { int numberofblocks = (ceil)((float)numofsubface / BLOCK_SIZE); kernelMarkAllEncsubfaces << <numberofblocks, BLOCK_SIZE >> > ( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_trifacelist[0]), thrust::raw_pointer_cast(&t_tri2tetlist[0]), thrust::raw_pointer_cast(&t_subfaceencmarker[0]), thrust::raw_pointer_cast(&t_tetlist[0]), numofsubface ); } // This function splits the encroached subfaces iteratively void splitEncsubfaces( RealD& t_pointlist, TriHandleD& t_point2trilist, TetHandleD& t_point2tetlist, PointTypeD& t_pointtypelist, RealD& t_pointradius, IntD& t_seglist, TriHandleD& t_seg2trilist, TetHandleD& t_seg2tetlist, IntD& t_seg2parentidxlist, IntD& t_segparentendpointidxlist, TriStatusD& t_segstatus, IntD& t_trifacelist, TetHandleD& t_tri2tetlist, TriHandleD& t_tri2trilist, TriHandleD& t_tri2seglist, IntD& t_tri2parentidxlist, IntD& t_triid2parentoffsetlist, IntD& t_triparentendpointidxlist, TriStatusD& t_tristatus, IntD& t_tetlist, TetHandleD& t_neighborlist, TriHandleD& t_tet2trilist, TriHandleD& t_tet2seglist, TetStatusD& t_tetstatus, IntD& t_segencmarker, IntD& t_subfaceencmarker, int& numofpoints, int& numofsubseg, int& numofsubface, int& numoftet, MESHBH* behavior, int iter_tet, int debug_msg, bool debug_error, bool debug_timing ) { int numberofencsubfaces; // number of encroached subfaces IntD t_encsubfacelist; IntD t_threadmarker; int code = 1; int iteration = 0; while (true) { // Update the active encroached subface list. // Exclude the empty ones (their markers have already been set to -1). numberofencsubfaces = updateActiveListByMarker_Slot(t_subfaceencmarker, t_encsubfacelist, numofsubface); if(debug_msg) printf(" Iteration #%d: number of encroached subfaces = %d\n", iteration, numberofencsubfaces); if (numberofencsubfaces == 0) break; t_threadmarker.resize(numberofencsubfaces); thrust::fill(t_threadmarker.begin(), t_threadmarker.end(), 1); code = insertPoint( t_pointlist, t_point2trilist, t_point2tetlist, t_pointtypelist, t_pointradius, t_seglist, t_seg2trilist, t_seg2tetlist, t_seg2parentidxlist, t_segparentendpointidxlist, t_segstatus, t_trifacelist, t_tri2tetlist, t_tri2trilist, t_tri2seglist, t_tri2parentidxlist, t_triid2parentoffsetlist, t_triparentendpointidxlist, t_tristatus, t_tetlist, t_neighborlist, t_tet2trilist, t_tet2seglist, t_tetstatus, t_segencmarker, t_subfaceencmarker, t_encsubfacelist, t_threadmarker, numberofencsubfaces, 0, numberofencsubfaces, 0, // split subface numofpoints, numofsubseg, numofsubface, numoftet, behavior, -1, iteration, iter_tet, debug_msg, debug_error, debug_timing ); if (!code) break; splitEncsegs( t_pointlist, t_point2trilist, t_point2tetlist, t_pointtypelist, t_pointradius, t_seglist, t_seg2trilist, t_seg2tetlist, t_seg2parentidxlist, t_segparentendpointidxlist, t_segstatus, t_trifacelist, t_tri2tetlist, t_tri2trilist, t_tri2seglist, t_tri2parentidxlist, t_triid2parentoffsetlist, t_triparentendpointidxlist, t_tristatus, t_tetlist, t_neighborlist, t_tet2trilist, t_tet2seglist, t_tetstatus, t_segencmarker, t_subfaceencmarker, numofpoints, numofsubseg, numofsubface, numoftet, behavior, iteration, iter_tet, 0, debug_error, false ); hipDeviceSynchronize(); iteration++; } if (!code && debug_msg) printf(" Ended with %d bad subface\n", numberofencsubfaces); }
11283c6c62f0da7fadc0f966563bea36ffc8b266.cu
#include "CudaSplitEncseg.h" #include "CudaSplitEncsubface.h" #include "CudaInsertPoint.h" #include "CudaMesh.h" /* Host */ // This function assumes the input encmarker has be set correctly // in the initialization void initSubfaceEncmarkers( RealD& t_pointlist, IntD& t_trifacelist, TetHandleD& t_tri2tetlist, IntD& t_subfaceencmarker, IntD& t_tetlist, int& numofsubface ) { int numberofblocks = (ceil)((float)numofsubface / BLOCK_SIZE); kernelMarkAllEncsubfaces << <numberofblocks, BLOCK_SIZE >> > ( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_trifacelist[0]), thrust::raw_pointer_cast(&t_tri2tetlist[0]), thrust::raw_pointer_cast(&t_subfaceencmarker[0]), thrust::raw_pointer_cast(&t_tetlist[0]), numofsubface ); } // This function splits the encroached subfaces iteratively void splitEncsubfaces( RealD& t_pointlist, TriHandleD& t_point2trilist, TetHandleD& t_point2tetlist, PointTypeD& t_pointtypelist, RealD& t_pointradius, IntD& t_seglist, TriHandleD& t_seg2trilist, TetHandleD& t_seg2tetlist, IntD& t_seg2parentidxlist, IntD& t_segparentendpointidxlist, TriStatusD& t_segstatus, IntD& t_trifacelist, TetHandleD& t_tri2tetlist, TriHandleD& t_tri2trilist, TriHandleD& t_tri2seglist, IntD& t_tri2parentidxlist, IntD& t_triid2parentoffsetlist, IntD& t_triparentendpointidxlist, TriStatusD& t_tristatus, IntD& t_tetlist, TetHandleD& t_neighborlist, TriHandleD& t_tet2trilist, TriHandleD& t_tet2seglist, TetStatusD& t_tetstatus, IntD& t_segencmarker, IntD& t_subfaceencmarker, int& numofpoints, int& numofsubseg, int& numofsubface, int& numoftet, MESHBH* behavior, int iter_tet, int debug_msg, bool debug_error, bool debug_timing ) { int numberofencsubfaces; // number of encroached subfaces IntD t_encsubfacelist; IntD t_threadmarker; int code = 1; int iteration = 0; while (true) { // Update the active encroached subface list. // Exclude the empty ones (their markers have already been set to -1). numberofencsubfaces = updateActiveListByMarker_Slot(t_subfaceencmarker, t_encsubfacelist, numofsubface); if(debug_msg) printf(" Iteration #%d: number of encroached subfaces = %d\n", iteration, numberofencsubfaces); if (numberofencsubfaces == 0) break; t_threadmarker.resize(numberofencsubfaces); thrust::fill(t_threadmarker.begin(), t_threadmarker.end(), 1); code = insertPoint( t_pointlist, t_point2trilist, t_point2tetlist, t_pointtypelist, t_pointradius, t_seglist, t_seg2trilist, t_seg2tetlist, t_seg2parentidxlist, t_segparentendpointidxlist, t_segstatus, t_trifacelist, t_tri2tetlist, t_tri2trilist, t_tri2seglist, t_tri2parentidxlist, t_triid2parentoffsetlist, t_triparentendpointidxlist, t_tristatus, t_tetlist, t_neighborlist, t_tet2trilist, t_tet2seglist, t_tetstatus, t_segencmarker, t_subfaceencmarker, t_encsubfacelist, t_threadmarker, numberofencsubfaces, 0, numberofencsubfaces, 0, // split subface numofpoints, numofsubseg, numofsubface, numoftet, behavior, -1, iteration, iter_tet, debug_msg, debug_error, debug_timing ); if (!code) break; splitEncsegs( t_pointlist, t_point2trilist, t_point2tetlist, t_pointtypelist, t_pointradius, t_seglist, t_seg2trilist, t_seg2tetlist, t_seg2parentidxlist, t_segparentendpointidxlist, t_segstatus, t_trifacelist, t_tri2tetlist, t_tri2trilist, t_tri2seglist, t_tri2parentidxlist, t_triid2parentoffsetlist, t_triparentendpointidxlist, t_tristatus, t_tetlist, t_neighborlist, t_tet2trilist, t_tet2seglist, t_tetstatus, t_segencmarker, t_subfaceencmarker, numofpoints, numofsubseg, numofsubface, numoftet, behavior, iteration, iter_tet, 0, debug_error, false ); cudaDeviceSynchronize(); iteration++; } if (!code && debug_msg) printf(" Ended with %d bad subface\n", numberofencsubfaces); }
9dcb2da06d3ba702571359510c99959a8376ce6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions normal z -> c */ #include "common_magma.h" #define PRECISION_z /*The version for tesla can be found in zhemv_tesla.cu */ #if (GPUSHMEM >= 200) #define magmablas_zhemv_200 magmablas_zhemv #define magmablas_zhemv2_200 magmablas_zhemv2 #define NB_64 /* turning on NB_64, it will call routine blocksize = 64 otherwise it will can blocksize = 32 which is 10% faster in z,c precision */ #ifdef NB_64// using block size 64 #define zhemv_bs 64 #define thread_x 64 #define thread_y 4 #define bank_shift 33 #define quarter_thread_x 16 #define half_thread_x 32 #else // using block size 32 #define zhemv_bs 32 #define thread_x 32 #define thread_y 8 #define bank_shift 33 #define SWITCH 1400 #endif /******************************************************************************* * Functions for each specific cases - Lower case */ #ifdef NB_64 __global__ void magmablas_zhemv_200_L_special( magma_int_t n, hipDoubleComplex alpha, const hipDoubleComplex *A, magma_int_t lda, const hipDoubleComplex *x, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *y, magma_int_t incy, hipDoubleComplex *WC) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; hipDoubleComplex res = MAGMA_Z_ZERO; hipDoubleComplex res_ = MAGMA_Z_ZERO; hipDoubleComplex res1 = MAGMA_Z_ZERO; __shared__ hipDoubleComplex la [quarter_thread_x][thread_x+2]; __shared__ hipDoubleComplex buff [thread_x]; __shared__ hipDoubleComplex buff2 [thread_x]; hipDoubleComplex tr[4]; hipDoubleComplex b[4]; magma_int_t break_d = thread_x * blkc; const magma_int_t td = (thread_x * ty ) + tx; magma_int_t tx_ = td % half_thread_x; magma_int_t ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += ty_* lda + tx_ ; if( ty == 0 ){ buff[tx] = x[0]; } // obtain the vector x store in buff; tx = tx_ ; ty = ty_ ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j +=8) la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(ty_ * 4 + 4) ; i++){ if ( i < tx_ ) { la[0][bank_shift * tx_ + i] = cuConj( la[0][ i * bank_shift + tx_] ) ; } else la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i] ; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= cuConj( la[0][bank_shift * tx_ + j + ty_ * 4] ) * buff[j + ty_ * 4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res1,0); } __syncthreads(); MAGMA_Z_SET2REAL(res, 0) ; A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = cuConj( la[0][bank_shift*i+tx_] ) ; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i] ; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= cuConj( la[0][bank_shift*tx_+j+ty_*4] ) * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); hipDoubleComplex res2; MAGMA_Z_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res2,0); } __syncthreads(); MAGMA_Z_SET2REAL(res,0); A-=half_thread_x *lda ; MAGMA_Z_SET2REAL(res_,0); #pragma unroll for(magma_int_t j=0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+= cuConj(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { MAGMA_Z_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_Z_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda * blkc * thread_x; x= x - blkc * thread_x *incx ; A+=4 * ty* lda ; A+=tx; magma_int_t wc_c = 0 ; magma_int_t count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; if( blkc * thread_x >=thread_x) #pragma unroll for(magma_int_t i=0; i<thread_x; i += thread_x ) { MAGMA_Z_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++) { #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda]; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += tr[j] * buff2[ quarter_thread_x * k + ty * 4 + j]; la[( j + ty * 4)][tx] = cuConj(tr[j]) * buff[tx]; } __syncthreads(); MAGMA_Z_SET2REAL(res_,0); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res_+=la[tx_][ty_*4+j] ; } b[k] = res_ ; __syncthreads(); A += lda * quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } for(magma_int_t i=thread_x; i< (blkc * thread_x); i += thread_x ) { MAGMA_Z_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++) { #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += tr[j] * buff2[quarter_thread_x*k + ty*4+(j)]; la[( j + ty * 4)][tx] = cuConj( tr[j] )* buff[tx]; } __syncthreads(); MAGMA_Z_SET2REAL(res_,0); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j] ; b[k] = res_ ; __syncthreads(); A += lda * quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res = la[0][tx]+ la[1][tx] + la[2][tx]+ la[3][tx]; WC[0+lda*(blkc) ] = res; } } /************************************************************** * Lower case for generic sizes */ __global__ void magmablas_zhemv_200_L_generic(magma_int_t n, hipDoubleComplex alpha, const hipDoubleComplex *A, magma_int_t lda, const hipDoubleComplex *x, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *y, magma_int_t incy, hipDoubleComplex *WC, magma_int_t m_mod_thread_x) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; hipDoubleComplex res = MAGMA_Z_ZERO; hipDoubleComplex res_ = MAGMA_Z_ZERO; hipDoubleComplex res1 = MAGMA_Z_ZERO; __shared__ hipDoubleComplex la [quarter_thread_x][thread_x+2]; __shared__ hipDoubleComplex buff [thread_x]; __shared__ hipDoubleComplex buff2[thread_x]; hipDoubleComplex tr[4]; hipDoubleComplex b[8]; magma_int_t break_d = thread_x * blkc; const magma_int_t td = (thread_x * ty ) + tx; magma_int_t tx_ = td % half_thread_x; magma_int_t ty_ = td / half_thread_x; WC+= break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += lda * ty_; magma_int_t trackA ; if( blkc == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_Z_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx_; A += trackA ; } // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_Z_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 9999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-=trackA; } else { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } tx = tx_ ; ty = ty_ ; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(ty_*4+4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = cuConj(la[0][i*bank_shift+tx_]) ; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i] ; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += cuConj(la[0][bank_shift*tx_+j+ty_*4])* buff[j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res1,0); } __syncthreads(); MAGMA_Z_SET2REAL(res,0); if( blkc == ( gridDim.x - 1 ) ) { if ( (tx_+half_thread_x) > m_mod_thread_x ) trackA = m_mod_thread_x; else trackA = tx_ + half_thread_x; A+= trackA+half_thread_x*lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ if( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) { MAGMA_Z_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 99999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-= trackA+half_thread_x*lda ; A+=tx_ ; A+= half_thread_x + half_thread_x *lda ; } else { A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = cuConj(la[0][bank_shift*i+tx_]) ; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i] ; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= cuConj(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); hipDoubleComplex res2; MAGMA_Z_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res2,0); } __syncthreads(); MAGMA_Z_SET2REAL(res,0); MAGMA_Z_SET2REAL(res_,0); A-=half_thread_x *lda ; if( blkc == ( gridDim.x - 1 ) ) { A-=tx_; if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A+= trackA ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_Z_SET2REAL(tr[j/8], 99999); } else tr[j/8] = A[ j * lda]; A-=trackA; A+=tx_; } else { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res+= tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+= cuConj(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { MAGMA_Z_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_Z_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda*break_d; x= x - break_d *incx ; A+=4 * ty* lda ; if( blkc == ( gridDim.x - 1 ) ) { if(tx <= m_mod_thread_x ) A+=tx; else A+=m_mod_thread_x; } else{ A+=tx; } magma_int_t wc_c = 0 ; magma_int_t count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) b[j] = buff[ty_*4+j]; if( break_d > 0) #pragma unroll for(magma_int_t i=0; i< thread_x; i += thread_x ){ MAGMA_Z_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++){ #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res+=tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = cuConj(tr[j]); } __syncthreads(); MAGMA_Z_SET2REAL(res_, 0) ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j]* b[j] ; b[4+k] = res_ ; __syncthreads(); A+=lda* quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } for(magma_int_t i=thread_x; i<break_d; i += thread_x ){ MAGMA_Z_SET2REAL(res_, 0) ; count++; if(ty == 0 ) buff2[tx] = x[i*incx]; __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++){ #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res+=tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = cuConj(tr[j]); } __syncthreads(); MAGMA_Z_SET2REAL(res_, 0) ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j]* b[j] ; b[4+k] = res_ ; __syncthreads(); A+=lda* quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx] ; WC[0+lda*(blkc)] = res; } } __global__ void magmablas_zhemv_200_L_update(magma_int_t n, hipDoubleComplex alpha, const hipDoubleComplex* A, magma_int_t lda, const hipDoubleComplex *x, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *y, magma_int_t incy, hipDoubleComplex *WC ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * thread_x + tx ; hipDoubleComplex Ca; MAGMA_Z_SET2REAL(Ca, 0) ; WC+= ind + lda * blockIdx.x; for(i = blockIdx.x*thread_x; i<n; i+=thread_x){ Ca += WC[0] ; WC += thread_x; } if( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_zhemv_200_L(magma_int_t m, hipDoubleComplex alpha, const hipDoubleComplex *A, magma_int_t lda, const hipDoubleComplex *X, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *Y, magma_int_t incy, hipDoubleComplex *dC_work) { magma_int_t blocks; if (m % zhemv_bs==0) blocks = m / zhemv_bs; else blocks = m / zhemv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(thread_x, thread_y, 1); dim3 threads_u(zhemv_bs, 1, 1); /* * If matrix size is multiple of zhemv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % zhemv_bs == 0 ) { hipLaunchKernelGGL(( magmablas_zhemv_200_L_special) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } else{ magma_int_t m_mod_thread_x = m%zhemv_bs - 1; hipLaunchKernelGGL(( magmablas_zhemv_200_L_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x); } hipLaunchKernelGGL(( magmablas_zhemv_200_L_update), dim3(grid), dim3(threads_u), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } #else /******************************************************************************* * Functions for each specific cases - Lower case nb = 32 */ __global__ void magmablas_zhemv_200_L_special_32_s( magma_int_t n, hipDoubleComplex alpha, hipDoubleComplex *A, magma_int_t lda, hipDoubleComplex *x, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *y, magma_int_t incy, hipDoubleComplex *WC, magma_int_t nb) { if(blockIdx.y > blockIdx.x) return; magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; hipDoubleComplex res = MAGMA_Z_ZERO;// used in scan the row hipDoubleComplex res_ = MAGMA_Z_ZERO;// used in scan the column __shared__ hipDoubleComplex la [1056]; __shared__ hipDoubleComplex buff [zhemv_bs]; __shared__ hipDoubleComplex buff2 [zhemv_bs]; magma_int_t break_d = zhemv_bs * blockIdx.x; A += break_d ; A += lda * ty + tx; A += lda * (blockIdx.y ) * zhemv_bs; // x += tx; if ( blockIdx.x == blockIdx.y ) // diagonal { x += (blockIdx.y * zhemv_bs) * incx; if( ty == 0 ) { buff[tx] = x[0]; } // obtain the vector x store in buff; #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) la[ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++) { if ( i < tx ) { la[bank_shift * tx + i] = cuConj(la[ i * bank_shift + tx]) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += cuConj(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; __syncthreads(); } else // non diagonal { x += (blockIdx.x * zhemv_bs) * incx; if( ty == 0 ) { buff[tx] = x[0]; } // obtain the vector x and store in buff; buff store its corresponding upper elements instead of buff2; x -= (blockIdx.x * zhemv_bs ) * incx; x += (blockIdx.y * zhemv_bs ) * incx; if( ty == 0 ) { buff2[tx] = x[0]; } // obtain the vector x store in buff2; #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) { la[ bank_shift * (ty+j) + tx] = A[ j * lda]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; // } __syncthreads(); la[bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res_ = la[tx*bank_shift+0]+la[tx*bank_shift+1] + la[tx*bank_shift+2]+la[tx*bank_shift+3] + la[tx*bank_shift+4]+la[tx*bank_shift+5] + la[tx*bank_shift+6]+la[tx*bank_shift+7]; WC[ tx + blockIdx.y * zhemv_bs + lda * blockIdx.x ] = res_; // write to its corresponding upper side position } __syncthreads(); } // end if else la[bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res = la[tx*bank_shift+0]+la[tx*bank_shift+1] + la[tx*bank_shift+2]+la[tx*bank_shift+3] + la[tx*bank_shift+4]+la[tx*bank_shift+5] + la[tx*bank_shift+6]+la[tx*bank_shift+7]; WC[ tx + blockIdx.x * zhemv_bs + lda * blockIdx.y] = res; } } __global__ void magmablas_zhemv_200_L_special_32( magma_int_t n, hipDoubleComplex alpha, hipDoubleComplex *A, magma_int_t lda, hipDoubleComplex *x, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *y, magma_int_t incy, hipDoubleComplex *WC, magma_int_t nb) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; hipDoubleComplex res = MAGMA_Z_ZERO;// used in scan the row hipDoubleComplex res_ = MAGMA_Z_ZERO;// used in scan the column hipDoubleComplex res1 = MAGMA_Z_ZERO;// tem for res hipDoubleComplex res2 = MAGMA_Z_ZERO;// tem for res_ __shared__ hipDoubleComplex la [16][64+2]; __shared__ hipDoubleComplex buff [zhemv_bs]; __shared__ hipDoubleComplex buff2 [zhemv_bs]; magma_int_t break_d = zhemv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += ty * lda + tx ; if( ty == 0 ) { buff[tx] = x[0]; } // obtain the vector x store in buff; { A += lda * (blkc) * zhemv_bs; // change #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++){ if ( i < tx ) { la[0][bank_shift * tx + i] = cuConj( la[0][ i * bank_shift + tx] ) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += cuConj( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; __syncthreads(); A -= lda * (blkc) * zhemv_bs; } x -= blkc * zhemv_bs *incx ; x= x- tx*incx; magma_int_t wc_c = 0 ; magma_int_t count = 0 ; WC += break_d + tx; if( blkc > 0) for(magma_int_t s=0; s< (blkc * zhemv_bs); s += zhemv_bs ) { MAGMA_Z_SET2REAL(res_,0); count++; #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff; __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += 1; x += zhemv_bs; A += lda * zhemv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } /************************************************************** * Lower case for generic sizes */ __global__ void magmablas_zhemv_200_L_generic_32_s( magma_int_t n, hipDoubleComplex alpha, hipDoubleComplex *A, magma_int_t lda, hipDoubleComplex *x, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *y, magma_int_t incy, hipDoubleComplex *WC, magma_int_t m_mod_thread_x, magma_int_t nb) { if(blockIdx.y > blockIdx.x) return; magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; hipDoubleComplex res = MAGMA_Z_ZERO;// used in scan the row hipDoubleComplex res_ = MAGMA_Z_ZERO;// used in scan the column __shared__ hipDoubleComplex la [1056]; __shared__ hipDoubleComplex buff [zhemv_bs]; __shared__ hipDoubleComplex buff2 [zhemv_bs]; magma_int_t break_d = zhemv_bs * blockIdx.x; A += break_d ; A += lda * ty; A += lda * (blockIdx.y ) * zhemv_bs; // x += tx; x += (blockIdx.x * zhemv_bs) * incx; magma_int_t trackA ; if( blockIdx.x == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_Z_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx; A += trackA ; } __syncthreads(); if ( blockIdx.x == blockIdx.y) // diagonal { if( blockIdx.x == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j+=8){ if( ( ty + j ) > m_mod_thread_x ) { MAGMA_Z_SET2REAL(la[bank_shift*(ty+j)+tx], 9999); } else la[bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j+=8){ la[bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++) { if ( i < tx ) { la[bank_shift * tx + i] = cuConj(la[ i * bank_shift + tx]) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += cuConj(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; __syncthreads(); } else // non diagonal { // obtain the vector x and store in buff; buff store its corresponding upper elements instead of buff2; x -= (blockIdx.x * zhemv_bs ) * incx; x += (blockIdx.y * zhemv_bs ) * incx; if( ty == 0 ) { buff2[tx] = x[0]; } // obtain the vector x store in buff2; #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) { la[ bank_shift * (ty+j) + tx] = A[ j * lda]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; // } __syncthreads(); la[bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res_ = la[tx*bank_shift+0]+la[tx*bank_shift+1] + la[tx*bank_shift+2]+la[tx*bank_shift+3] + la[tx*bank_shift+4]+la[tx*bank_shift+5] + la[tx*bank_shift+6]+la[tx*bank_shift+7]; WC[ tx + blockIdx.y * zhemv_bs + lda * blockIdx.x ] = res_; // write to its corresponding upper side position } __syncthreads(); } // end if else la[bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res = la[tx*bank_shift+0]+la[tx*bank_shift+1] + la[tx*bank_shift+2]+la[tx*bank_shift+3] + la[tx*bank_shift+4]+la[tx*bank_shift+5] + la[tx*bank_shift+6]+la[tx*bank_shift+7]; WC[ tx + blockIdx.x * zhemv_bs + lda * blockIdx.y] = res; } } __global__ void magmablas_zhemv_200_L_generic_32(magma_int_t n, hipDoubleComplex alpha, hipDoubleComplex *A, magma_int_t lda, hipDoubleComplex *x, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *y, magma_int_t incy, hipDoubleComplex *WC, magma_int_t m_mod_thread_x, magma_int_t nb) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; hipDoubleComplex res = MAGMA_Z_ZERO; hipDoubleComplex res_ = MAGMA_Z_ZERO; hipDoubleComplex res1 = MAGMA_Z_ZERO; hipDoubleComplex res2 = MAGMA_Z_ZERO; __shared__ hipDoubleComplex la [16][64+2]; __shared__ hipDoubleComplex buff [zhemv_bs]; __shared__ hipDoubleComplex buff2 [zhemv_bs]; magma_int_t break_d = zhemv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += lda * ty; magma_int_t trackA ; if( blkc == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_Z_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx; A += trackA ; } { A += lda * (blkc) * zhemv_bs; // change // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j+=8){ if( ( ty + j ) > m_mod_thread_x ) { MAGMA_Z_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j+=8){ la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty*4+4) ; i++){ if ( i < tx ) { la[0][bank_shift*tx+i] = cuConj(la[0][i*bank_shift+tx]) ; } else la[0][bank_shift*tx+i] = la[0][bank_shift*tx+i] ; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += cuConj(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4]; __syncthreads(); A -= lda * (blkc) * zhemv_bs; } __syncthreads(); x= x - break_d *incx ; x= x - tx * incx ; magma_int_t wc_c = 0 ; magma_int_t count = 0 ; WC += break_d + tx; if( blkc > 0) for(magma_int_t s=0; s< (blkc * zhemv_bs); s += zhemv_bs ) { MAGMA_Z_SET2REAL(res_,0); count++; #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); if( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff2; __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += 1; x += zhemv_bs; A += lda * zhemv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } __global__ void magmablas_zhemv_200_L_update_32_s(magma_int_t n, hipDoubleComplex alpha, hipDoubleComplex* A, magma_int_t lda, hipDoubleComplex *x, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *y, magma_int_t incy, hipDoubleComplex *WC, magma_int_t nb ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * zhemv_bs + tx ; hipDoubleComplex Ca; MAGMA_Z_SET2REAL(Ca, 0) ; WC+= ind; for(i =0; i<n; i+=zhemv_bs){ Ca += WC[i/zhemv_bs * lda] ; } if( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } __global__ void magmablas_zhemv_200_L_update_32(magma_int_t n, hipDoubleComplex alpha, hipDoubleComplex* A, magma_int_t lda, hipDoubleComplex *x, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *y, magma_int_t incy, hipDoubleComplex *WC, magma_int_t nb ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * zhemv_bs + tx ; hipDoubleComplex Ca; MAGMA_Z_SET2REAL(Ca, 0) ; WC+= ind + lda * blockIdx.x; for(i = blockIdx.x*zhemv_bs; i<n; i+=zhemv_bs){ Ca += WC[0] ; WC += zhemv_bs; } if( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_zhemv_200_L_32(magma_int_t m, hipDoubleComplex alpha, hipDoubleComplex *A, magma_int_t lda, hipDoubleComplex *X, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *Y, magma_int_t incy, hipDoubleComplex *dC_work, magma_int_t nb) { magma_int_t blocks; if (m % zhemv_bs==0) blocks = m / zhemv_bs; else blocks = m / zhemv_bs + 1; dim3 grid(blocks, 1, 1); dim3 grid_s(blocks, blocks, 1); dim3 threads(thread_x, thread_y, 1); dim3 threads_u(zhemv_bs, 1, 1); /* * If matrix size is multiple of zhemv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % zhemv_bs == 0 ) { if(m < SWITCH) hipLaunchKernelGGL(( magmablas_zhemv_200_L_special_32_s) , dim3(grid_s), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb); else hipLaunchKernelGGL(( magmablas_zhemv_200_L_special_32) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb); } else{ magma_int_t m_mod_thread_x = m%zhemv_bs - 1; if(m < SWITCH) hipLaunchKernelGGL(( magmablas_zhemv_200_L_generic_32_s) , dim3(grid_s), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, nb); else hipLaunchKernelGGL(( magmablas_zhemv_200_L_generic_32) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, nb); } if(m < SWITCH) hipLaunchKernelGGL(( magmablas_zhemv_200_L_update_32_s), dim3(grid), dim3(threads_u), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb); else hipLaunchKernelGGL(( magmablas_zhemv_200_L_update_32), dim3(grid), dim3(threads_u), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb); } #endif /************************************************************************* Purpose ======= magmablas_zhemv performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - COMPLEX*16 . On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - COMPLEX*16 . On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_zhemv_200( char uplo, magma_int_t n, hipDoubleComplex alpha, const hipDoubleComplex *A, magma_int_t lda, const hipDoubleComplex *X, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *Y, magma_int_t incy) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) hipblasZhemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); else { hipDoubleComplex *dC_work; magma_int_t blocks = n / zhemv_bs + (n % zhemv_bs != 0); magma_int_t workspace = lda * (blocks + 1); /* TODO: need to add a MAGMA context to handle workspaces */ hipblasAlloc( workspace, sizeof(hipDoubleComplex), (void**)&dC_work ) ; hipblasGetError( ) ; #ifdef NB_64 magmablas_zhemv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work); #else magmablas_zhemv_200_L_32(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work, zhemv_bs); #endif hipblasFree(dC_work); hipblasGetError( ) ; } return MAGMA_SUCCESS; } extern "C" magma_int_t magmablas_zhemv2_200( char uplo, magma_int_t n, hipDoubleComplex alpha, const hipDoubleComplex *A, magma_int_t lda, const hipDoubleComplex *X, magma_int_t incx, hipDoubleComplex beta, hipDoubleComplex *Y, magma_int_t incy, hipDoubleComplex *work, int lwork) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) hipblasZhemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); else { magma_int_t blocks = n / zhemv_bs + (n % zhemv_bs != 0); magma_int_t workspace = lda * (blocks + 1); if (lwork < workspace){ printf("Not enough work space in magmablas_zhemv: passed %d, required %d\n", lwork, workspace); exit(1); } //printf("You are using zhemv_bs=%d\n", zhemv_bs); #ifdef NB_64 if( n < 1622) hipblasZhemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); else magmablas_zhemv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, work); #else magmablas_zhemv_200_L_32(n, alpha, A, lda, X, incx, beta, Y, incy, work, zhemv_bs); #endif } return MAGMA_SUCCESS; } #endif /* (GPUSHMEM >= 200) */
9dcb2da06d3ba702571359510c99959a8376ce6f.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @precisions normal z -> c */ #include "common_magma.h" #define PRECISION_z /*The version for tesla can be found in zhemv_tesla.cu */ #if (GPUSHMEM >= 200) #define magmablas_zhemv_200 magmablas_zhemv #define magmablas_zhemv2_200 magmablas_zhemv2 #define NB_64 /* turning on NB_64, it will call routine blocksize = 64 otherwise it will can blocksize = 32 which is 10% faster in z,c precision */ #ifdef NB_64// using block size 64 #define zhemv_bs 64 #define thread_x 64 #define thread_y 4 #define bank_shift 33 #define quarter_thread_x 16 #define half_thread_x 32 #else // using block size 32 #define zhemv_bs 32 #define thread_x 32 #define thread_y 8 #define bank_shift 33 #define SWITCH 1400 #endif /******************************************************************************* * Functions for each specific cases - Lower case */ #ifdef NB_64 __global__ void magmablas_zhemv_200_L_special( magma_int_t n, cuDoubleComplex alpha, const cuDoubleComplex *A, magma_int_t lda, const cuDoubleComplex *x, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *y, magma_int_t incy, cuDoubleComplex *WC) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; cuDoubleComplex res = MAGMA_Z_ZERO; cuDoubleComplex res_ = MAGMA_Z_ZERO; cuDoubleComplex res1 = MAGMA_Z_ZERO; __shared__ cuDoubleComplex la [quarter_thread_x][thread_x+2]; __shared__ cuDoubleComplex buff [thread_x]; __shared__ cuDoubleComplex buff2 [thread_x]; cuDoubleComplex tr[4]; cuDoubleComplex b[4]; magma_int_t break_d = thread_x * blkc; const magma_int_t td = (thread_x * ty ) + tx; magma_int_t tx_ = td % half_thread_x; magma_int_t ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += ty_* lda + tx_ ; if( ty == 0 ){ buff[tx] = x[0]; } // obtain the vector x store in buff; tx = tx_ ; ty = ty_ ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j +=8) la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(ty_ * 4 + 4) ; i++){ if ( i < tx_ ) { la[0][bank_shift * tx_ + i] = cuConj( la[0][ i * bank_shift + tx_] ) ; } else la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i] ; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= cuConj( la[0][bank_shift * tx_ + j + ty_ * 4] ) * buff[j + ty_ * 4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res1,0); } __syncthreads(); MAGMA_Z_SET2REAL(res, 0) ; A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = cuConj( la[0][bank_shift*i+tx_] ) ; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i] ; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= cuConj( la[0][bank_shift*tx_+j+ty_*4] ) * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); cuDoubleComplex res2; MAGMA_Z_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res2,0); } __syncthreads(); MAGMA_Z_SET2REAL(res,0); A-=half_thread_x *lda ; MAGMA_Z_SET2REAL(res_,0); #pragma unroll for(magma_int_t j=0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+= cuConj(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { MAGMA_Z_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_Z_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda * blkc * thread_x; x= x - blkc * thread_x *incx ; A+=4 * ty* lda ; A+=tx; magma_int_t wc_c = 0 ; magma_int_t count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; if( blkc * thread_x >=thread_x) #pragma unroll for(magma_int_t i=0; i<thread_x; i += thread_x ) { MAGMA_Z_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++) { #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda]; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += tr[j] * buff2[ quarter_thread_x * k + ty * 4 + j]; la[( j + ty * 4)][tx] = cuConj(tr[j]) * buff[tx]; } __syncthreads(); MAGMA_Z_SET2REAL(res_,0); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res_+=la[tx_][ty_*4+j] ; } b[k] = res_ ; __syncthreads(); A += lda * quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } for(magma_int_t i=thread_x; i< (blkc * thread_x); i += thread_x ) { MAGMA_Z_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++) { #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += tr[j] * buff2[quarter_thread_x*k + ty*4+(j)]; la[( j + ty * 4)][tx] = cuConj( tr[j] )* buff[tx]; } __syncthreads(); MAGMA_Z_SET2REAL(res_,0); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j] ; b[k] = res_ ; __syncthreads(); A += lda * quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res = la[0][tx]+ la[1][tx] + la[2][tx]+ la[3][tx]; WC[0+lda*(blkc) ] = res; } } /************************************************************** * Lower case for generic sizes */ __global__ void magmablas_zhemv_200_L_generic(magma_int_t n, cuDoubleComplex alpha, const cuDoubleComplex *A, magma_int_t lda, const cuDoubleComplex *x, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *y, magma_int_t incy, cuDoubleComplex *WC, magma_int_t m_mod_thread_x) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; cuDoubleComplex res = MAGMA_Z_ZERO; cuDoubleComplex res_ = MAGMA_Z_ZERO; cuDoubleComplex res1 = MAGMA_Z_ZERO; __shared__ cuDoubleComplex la [quarter_thread_x][thread_x+2]; __shared__ cuDoubleComplex buff [thread_x]; __shared__ cuDoubleComplex buff2[thread_x]; cuDoubleComplex tr[4]; cuDoubleComplex b[8]; magma_int_t break_d = thread_x * blkc; const magma_int_t td = (thread_x * ty ) + tx; magma_int_t tx_ = td % half_thread_x; magma_int_t ty_ = td / half_thread_x; WC+= break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += lda * ty_; magma_int_t trackA ; if( blkc == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_Z_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx_; A += trackA ; } // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_Z_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 9999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-=trackA; } else { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } tx = tx_ ; ty = ty_ ; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(ty_*4+4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = cuConj(la[0][i*bank_shift+tx_]) ; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i] ; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += cuConj(la[0][bank_shift*tx_+j+ty_*4])* buff[j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res1,0); } __syncthreads(); MAGMA_Z_SET2REAL(res,0); if( blkc == ( gridDim.x - 1 ) ) { if ( (tx_+half_thread_x) > m_mod_thread_x ) trackA = m_mod_thread_x; else trackA = tx_ + half_thread_x; A+= trackA+half_thread_x*lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ if( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) { MAGMA_Z_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 99999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-= trackA+half_thread_x*lda ; A+=tx_ ; A+= half_thread_x + half_thread_x *lda ; } else { A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = cuConj(la[0][bank_shift*i+tx_]) ; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i] ; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= cuConj(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); cuDoubleComplex res2; MAGMA_Z_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res2,0); } __syncthreads(); MAGMA_Z_SET2REAL(res,0); MAGMA_Z_SET2REAL(res_,0); A-=half_thread_x *lda ; if( blkc == ( gridDim.x - 1 ) ) { A-=tx_; if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A+= trackA ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_Z_SET2REAL(tr[j/8], 99999); } else tr[j/8] = A[ j * lda]; A-=trackA; A+=tx_; } else { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res+= tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+= cuConj(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_Z_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { MAGMA_Z_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_Z_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda*break_d; x= x - break_d *incx ; A+=4 * ty* lda ; if( blkc == ( gridDim.x - 1 ) ) { if(tx <= m_mod_thread_x ) A+=tx; else A+=m_mod_thread_x; } else{ A+=tx; } magma_int_t wc_c = 0 ; magma_int_t count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) b[j] = buff[ty_*4+j]; if( break_d > 0) #pragma unroll for(magma_int_t i=0; i< thread_x; i += thread_x ){ MAGMA_Z_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++){ #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res+=tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = cuConj(tr[j]); } __syncthreads(); MAGMA_Z_SET2REAL(res_, 0) ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j]* b[j] ; b[4+k] = res_ ; __syncthreads(); A+=lda* quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } for(magma_int_t i=thread_x; i<break_d; i += thread_x ){ MAGMA_Z_SET2REAL(res_, 0) ; count++; if(ty == 0 ) buff2[tx] = x[i*incx]; __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++){ #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res+=tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = cuConj(tr[j]); } __syncthreads(); MAGMA_Z_SET2REAL(res_, 0) ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j]* b[j] ; b[4+k] = res_ ; __syncthreads(); A+=lda* quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx] ; WC[0+lda*(blkc)] = res; } } __global__ void magmablas_zhemv_200_L_update(magma_int_t n, cuDoubleComplex alpha, const cuDoubleComplex* A, magma_int_t lda, const cuDoubleComplex *x, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *y, magma_int_t incy, cuDoubleComplex *WC ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * thread_x + tx ; cuDoubleComplex Ca; MAGMA_Z_SET2REAL(Ca, 0) ; WC+= ind + lda * blockIdx.x; for(i = blockIdx.x*thread_x; i<n; i+=thread_x){ Ca += WC[0] ; WC += thread_x; } if( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_zhemv_200_L(magma_int_t m, cuDoubleComplex alpha, const cuDoubleComplex *A, magma_int_t lda, const cuDoubleComplex *X, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *Y, magma_int_t incy, cuDoubleComplex *dC_work) { magma_int_t blocks; if (m % zhemv_bs==0) blocks = m / zhemv_bs; else blocks = m / zhemv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(thread_x, thread_y, 1); dim3 threads_u(zhemv_bs, 1, 1); /* * If matrix size is multiple of zhemv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % zhemv_bs == 0 ) { magmablas_zhemv_200_L_special <<< grid, threads, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } else{ magma_int_t m_mod_thread_x = m%zhemv_bs - 1; magmablas_zhemv_200_L_generic <<< grid, threads, 0, magma_stream >>> ( m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x); } magmablas_zhemv_200_L_update<<< grid, threads_u, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } #else /******************************************************************************* * Functions for each specific cases - Lower case nb = 32 */ __global__ void magmablas_zhemv_200_L_special_32_s( magma_int_t n, cuDoubleComplex alpha, cuDoubleComplex *A, magma_int_t lda, cuDoubleComplex *x, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *y, magma_int_t incy, cuDoubleComplex *WC, magma_int_t nb) { if(blockIdx.y > blockIdx.x) return; magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; cuDoubleComplex res = MAGMA_Z_ZERO;// used in scan the row cuDoubleComplex res_ = MAGMA_Z_ZERO;// used in scan the column __shared__ cuDoubleComplex la [1056]; __shared__ cuDoubleComplex buff [zhemv_bs]; __shared__ cuDoubleComplex buff2 [zhemv_bs]; magma_int_t break_d = zhemv_bs * blockIdx.x; A += break_d ; A += lda * ty + tx; A += lda * (blockIdx.y ) * zhemv_bs; // x += tx; if ( blockIdx.x == blockIdx.y ) // diagonal { x += (blockIdx.y * zhemv_bs) * incx; if( ty == 0 ) { buff[tx] = x[0]; } // obtain the vector x store in buff; #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) la[ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++) { if ( i < tx ) { la[bank_shift * tx + i] = cuConj(la[ i * bank_shift + tx]) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += cuConj(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; __syncthreads(); } else // non diagonal { x += (blockIdx.x * zhemv_bs) * incx; if( ty == 0 ) { buff[tx] = x[0]; } // obtain the vector x and store in buff; buff store its corresponding upper elements instead of buff2; x -= (blockIdx.x * zhemv_bs ) * incx; x += (blockIdx.y * zhemv_bs ) * incx; if( ty == 0 ) { buff2[tx] = x[0]; } // obtain the vector x store in buff2; #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) { la[ bank_shift * (ty+j) + tx] = A[ j * lda]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; // } __syncthreads(); la[bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res_ = la[tx*bank_shift+0]+la[tx*bank_shift+1] + la[tx*bank_shift+2]+la[tx*bank_shift+3] + la[tx*bank_shift+4]+la[tx*bank_shift+5] + la[tx*bank_shift+6]+la[tx*bank_shift+7]; WC[ tx + blockIdx.y * zhemv_bs + lda * blockIdx.x ] = res_; // write to its corresponding upper side position } __syncthreads(); } // end if else la[bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res = la[tx*bank_shift+0]+la[tx*bank_shift+1] + la[tx*bank_shift+2]+la[tx*bank_shift+3] + la[tx*bank_shift+4]+la[tx*bank_shift+5] + la[tx*bank_shift+6]+la[tx*bank_shift+7]; WC[ tx + blockIdx.x * zhemv_bs + lda * blockIdx.y] = res; } } __global__ void magmablas_zhemv_200_L_special_32( magma_int_t n, cuDoubleComplex alpha, cuDoubleComplex *A, magma_int_t lda, cuDoubleComplex *x, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *y, magma_int_t incy, cuDoubleComplex *WC, magma_int_t nb) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; cuDoubleComplex res = MAGMA_Z_ZERO;// used in scan the row cuDoubleComplex res_ = MAGMA_Z_ZERO;// used in scan the column cuDoubleComplex res1 = MAGMA_Z_ZERO;// tem for res cuDoubleComplex res2 = MAGMA_Z_ZERO;// tem for res_ __shared__ cuDoubleComplex la [16][64+2]; __shared__ cuDoubleComplex buff [zhemv_bs]; __shared__ cuDoubleComplex buff2 [zhemv_bs]; magma_int_t break_d = zhemv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += ty * lda + tx ; if( ty == 0 ) { buff[tx] = x[0]; } // obtain the vector x store in buff; { A += lda * (blkc) * zhemv_bs; // change #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++){ if ( i < tx ) { la[0][bank_shift * tx + i] = cuConj( la[0][ i * bank_shift + tx] ) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += cuConj( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; __syncthreads(); A -= lda * (blkc) * zhemv_bs; } x -= blkc * zhemv_bs *incx ; x= x- tx*incx; magma_int_t wc_c = 0 ; magma_int_t count = 0 ; WC += break_d + tx; if( blkc > 0) for(magma_int_t s=0; s< (blkc * zhemv_bs); s += zhemv_bs ) { MAGMA_Z_SET2REAL(res_,0); count++; #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff; __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += 1; x += zhemv_bs; A += lda * zhemv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } /************************************************************** * Lower case for generic sizes */ __global__ void magmablas_zhemv_200_L_generic_32_s( magma_int_t n, cuDoubleComplex alpha, cuDoubleComplex *A, magma_int_t lda, cuDoubleComplex *x, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *y, magma_int_t incy, cuDoubleComplex *WC, magma_int_t m_mod_thread_x, magma_int_t nb) { if(blockIdx.y > blockIdx.x) return; magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; cuDoubleComplex res = MAGMA_Z_ZERO;// used in scan the row cuDoubleComplex res_ = MAGMA_Z_ZERO;// used in scan the column __shared__ cuDoubleComplex la [1056]; __shared__ cuDoubleComplex buff [zhemv_bs]; __shared__ cuDoubleComplex buff2 [zhemv_bs]; magma_int_t break_d = zhemv_bs * blockIdx.x; A += break_d ; A += lda * ty; A += lda * (blockIdx.y ) * zhemv_bs; // x += tx; x += (blockIdx.x * zhemv_bs) * incx; magma_int_t trackA ; if( blockIdx.x == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_Z_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx; A += trackA ; } __syncthreads(); if ( blockIdx.x == blockIdx.y) // diagonal { if( blockIdx.x == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j+=8){ if( ( ty + j ) > m_mod_thread_x ) { MAGMA_Z_SET2REAL(la[bank_shift*(ty+j)+tx], 9999); } else la[bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j+=8){ la[bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++) { if ( i < tx ) { la[bank_shift * tx + i] = cuConj(la[ i * bank_shift + tx]) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += cuConj(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; __syncthreads(); } else // non diagonal { // obtain the vector x and store in buff; buff store its corresponding upper elements instead of buff2; x -= (blockIdx.x * zhemv_bs ) * incx; x += (blockIdx.y * zhemv_bs ) * incx; if( ty == 0 ) { buff2[tx] = x[0]; } // obtain the vector x store in buff2; #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) { la[ bank_shift * (ty+j) + tx] = A[ j * lda]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; // } __syncthreads(); la[bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res_ = la[tx*bank_shift+0]+la[tx*bank_shift+1] + la[tx*bank_shift+2]+la[tx*bank_shift+3] + la[tx*bank_shift+4]+la[tx*bank_shift+5] + la[tx*bank_shift+6]+la[tx*bank_shift+7]; WC[ tx + blockIdx.y * zhemv_bs + lda * blockIdx.x ] = res_; // write to its corresponding upper side position } __syncthreads(); } // end if else la[bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res = la[tx*bank_shift+0]+la[tx*bank_shift+1] + la[tx*bank_shift+2]+la[tx*bank_shift+3] + la[tx*bank_shift+4]+la[tx*bank_shift+5] + la[tx*bank_shift+6]+la[tx*bank_shift+7]; WC[ tx + blockIdx.x * zhemv_bs + lda * blockIdx.y] = res; } } __global__ void magmablas_zhemv_200_L_generic_32(magma_int_t n, cuDoubleComplex alpha, cuDoubleComplex *A, magma_int_t lda, cuDoubleComplex *x, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *y, magma_int_t incy, cuDoubleComplex *WC, magma_int_t m_mod_thread_x, magma_int_t nb) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; cuDoubleComplex res = MAGMA_Z_ZERO; cuDoubleComplex res_ = MAGMA_Z_ZERO; cuDoubleComplex res1 = MAGMA_Z_ZERO; cuDoubleComplex res2 = MAGMA_Z_ZERO; __shared__ cuDoubleComplex la [16][64+2]; __shared__ cuDoubleComplex buff [zhemv_bs]; __shared__ cuDoubleComplex buff2 [zhemv_bs]; magma_int_t break_d = zhemv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += lda * ty; magma_int_t trackA ; if( blkc == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_Z_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx; A += trackA ; } { A += lda * (blkc) * zhemv_bs; // change // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j+=8){ if( ( ty + j ) > m_mod_thread_x ) { MAGMA_Z_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j+=8){ la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty*4+4) ; i++){ if ( i < tx ) { la[0][bank_shift*tx+i] = cuConj(la[0][i*bank_shift+tx]) ; } else la[0][bank_shift*tx+i] = la[0][bank_shift*tx+i] ; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += cuConj(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4]; __syncthreads(); A -= lda * (blkc) * zhemv_bs; } __syncthreads(); x= x - break_d *incx ; x= x - tx * incx ; magma_int_t wc_c = 0 ; magma_int_t count = 0 ; WC += break_d + tx; if( blkc > 0) for(magma_int_t s=0; s< (blkc * zhemv_bs); s += zhemv_bs ) { MAGMA_Z_SET2REAL(res_,0); count++; #pragma unroll for(magma_int_t j =0; j<zhemv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); if( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff2; __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += 1; x += zhemv_bs; A += lda * zhemv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } __global__ void magmablas_zhemv_200_L_update_32_s(magma_int_t n, cuDoubleComplex alpha, cuDoubleComplex* A, magma_int_t lda, cuDoubleComplex *x, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *y, magma_int_t incy, cuDoubleComplex *WC, magma_int_t nb ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * zhemv_bs + tx ; cuDoubleComplex Ca; MAGMA_Z_SET2REAL(Ca, 0) ; WC+= ind; for(i =0; i<n; i+=zhemv_bs){ Ca += WC[i/zhemv_bs * lda] ; } if( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } __global__ void magmablas_zhemv_200_L_update_32(magma_int_t n, cuDoubleComplex alpha, cuDoubleComplex* A, magma_int_t lda, cuDoubleComplex *x, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *y, magma_int_t incy, cuDoubleComplex *WC, magma_int_t nb ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * zhemv_bs + tx ; cuDoubleComplex Ca; MAGMA_Z_SET2REAL(Ca, 0) ; WC+= ind + lda * blockIdx.x; for(i = blockIdx.x*zhemv_bs; i<n; i+=zhemv_bs){ Ca += WC[0] ; WC += zhemv_bs; } if( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_zhemv_200_L_32(magma_int_t m, cuDoubleComplex alpha, cuDoubleComplex *A, magma_int_t lda, cuDoubleComplex *X, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *Y, magma_int_t incy, cuDoubleComplex *dC_work, magma_int_t nb) { magma_int_t blocks; if (m % zhemv_bs==0) blocks = m / zhemv_bs; else blocks = m / zhemv_bs + 1; dim3 grid(blocks, 1, 1); dim3 grid_s(blocks, blocks, 1); dim3 threads(thread_x, thread_y, 1); dim3 threads_u(zhemv_bs, 1, 1); /* * If matrix size is multiple of zhemv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % zhemv_bs == 0 ) { if(m < SWITCH) magmablas_zhemv_200_L_special_32_s <<< grid_s, threads, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb); else magmablas_zhemv_200_L_special_32 <<< grid, threads, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb); } else{ magma_int_t m_mod_thread_x = m%zhemv_bs - 1; if(m < SWITCH) magmablas_zhemv_200_L_generic_32_s <<< grid_s, threads, 0, magma_stream >>> ( m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, nb); else magmablas_zhemv_200_L_generic_32 <<< grid, threads, 0, magma_stream >>> ( m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, nb); } if(m < SWITCH) magmablas_zhemv_200_L_update_32_s<<< grid, threads_u, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb); else magmablas_zhemv_200_L_update_32<<< grid, threads_u, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb); } #endif /************************************************************************* Purpose ======= magmablas_zhemv performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - COMPLEX*16 . On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - COMPLEX*16 . On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_zhemv_200( char uplo, magma_int_t n, cuDoubleComplex alpha, const cuDoubleComplex *A, magma_int_t lda, const cuDoubleComplex *X, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *Y, magma_int_t incy) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) cublasZhemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); else { cuDoubleComplex *dC_work; magma_int_t blocks = n / zhemv_bs + (n % zhemv_bs != 0); magma_int_t workspace = lda * (blocks + 1); /* TODO: need to add a MAGMA context to handle workspaces */ cublasAlloc( workspace, sizeof(cuDoubleComplex), (void**)&dC_work ) ; cublasGetError( ) ; #ifdef NB_64 magmablas_zhemv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work); #else magmablas_zhemv_200_L_32(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work, zhemv_bs); #endif cublasFree(dC_work); cublasGetError( ) ; } return MAGMA_SUCCESS; } extern "C" magma_int_t magmablas_zhemv2_200( char uplo, magma_int_t n, cuDoubleComplex alpha, const cuDoubleComplex *A, magma_int_t lda, const cuDoubleComplex *X, magma_int_t incx, cuDoubleComplex beta, cuDoubleComplex *Y, magma_int_t incy, cuDoubleComplex *work, int lwork) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) cublasZhemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); else { magma_int_t blocks = n / zhemv_bs + (n % zhemv_bs != 0); magma_int_t workspace = lda * (blocks + 1); if (lwork < workspace){ printf("Not enough work space in magmablas_zhemv: passed %d, required %d\n", lwork, workspace); exit(1); } //printf("You are using zhemv_bs=%d\n", zhemv_bs); #ifdef NB_64 if( n < 1622) cublasZhemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); else magmablas_zhemv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, work); #else magmablas_zhemv_200_L_32(n, alpha, A, lda, X, incx, beta, Y, incy, work, zhemv_bs); #endif } return MAGMA_SUCCESS; } #endif /* (GPUSHMEM >= 200) */
2e13f0d8e636d923e58ad25a6150b49561536664.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2017, September 11 - October 9 // ### // Exercise 3 // Written by: Jiho Yang, M.Sc. Computational Science & Engineering // Matriculation number: 03675799 #include "helper.h" #include <iostream> using namespace std; // uncomment to use the camera //#define CAMERA __global__ void gamma_correction(float *d_imgOut, float *d_imgIn, int sizeImg, float gamma){ int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < sizeImg){ d_imgOut[i] = pow(d_imgIn[i], gamma); } } int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate hipDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // ### Define your own parameters here as needed // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // ### Define your own output images here as needed // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array float *imgIn = new float[(size_t)w*h*nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOut = new float[(size_t)w*h*mOut.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); // Time start // ### // ### // ### TODO: Main computation // ### // ### int sizeImg = (int)w*h*nc; size_t nbytes = (size_t)(sizeImg)*sizeof(float); float gamma = 3.1f; ///////////////////////////////// Gamma correction - CPU Computation ///////////////////////////////// /* for (int i = 0; i < sizeImg; i++){ imgOut[i] = pow(imgIn[i], gamma); } cout << "Processor - CPU" << endl; */ ////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// Gamma correction - GPU Computation ///////////////////////////////// float *d_imgOut = NULL; float *d_imgIn = NULL; hipMalloc(&d_imgIn, nbytes); CUDA_CHECK; hipMalloc(&d_imgOut, nbytes); CUDA_CHECK; hipMemcpy(d_imgIn, imgIn, nbytes, hipMemcpyHostToDevice); CUDA_CHECK; // Launch kernel dim3 block = dim3(128, 1, 1); dim3 grid = dim3((sizeImg+block.x-1)/block.x, 1, 1); // Execute gamma correction Timer timer; timer.start(); hipLaunchKernelGGL(( gamma_correction) , dim3(grid), dim3(block), 0, 0, d_imgOut, d_imgIn, sizeImg, gamma); timer.end(); float t = timer.get(); // elapsed time in seconds // Copy back to CPU hipMemcpy(imgOut, d_imgOut, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK; hipFree(d_imgOut); CUDA_CHECK; cout << "Processor - GPU" << endl; ////////////////////////////////////////////////////////////////////////////////////////////////////// cout << "time: " << t*1000 << " ms" << endl; // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array convert_layered_to_mat(mOut, imgOut); showImage("Output", mOut, 100+w+40, 100); // ### Display your own output images here as needed #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; delete[] imgOut; // close all opencv windows cvDestroyAllWindows(); return 0; }
2e13f0d8e636d923e58ad25a6150b49561536664.cu
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2017, September 11 - October 9 // ### // Exercise 3 // Written by: Jiho Yang, M.Sc. Computational Science & Engineering // Matriculation number: 03675799 #include "helper.h" #include <iostream> using namespace std; // uncomment to use the camera //#define CAMERA __global__ void gamma_correction(float *d_imgOut, float *d_imgIn, int sizeImg, float gamma){ int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < sizeImg){ d_imgOut[i] = pow(d_imgIn[i], gamma); } } int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate cudaDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // ### Define your own parameters here as needed // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // ### Define your own output images here as needed // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array float *imgIn = new float[(size_t)w*h*nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOut = new float[(size_t)w*h*mOut.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); // Time start // ### // ### // ### TODO: Main computation // ### // ### int sizeImg = (int)w*h*nc; size_t nbytes = (size_t)(sizeImg)*sizeof(float); float gamma = 3.1f; ///////////////////////////////// Gamma correction - CPU Computation ///////////////////////////////// /* for (int i = 0; i < sizeImg; i++){ imgOut[i] = pow(imgIn[i], gamma); } cout << "Processor - CPU" << endl; */ ////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// Gamma correction - GPU Computation ///////////////////////////////// float *d_imgOut = NULL; float *d_imgIn = NULL; cudaMalloc(&d_imgIn, nbytes); CUDA_CHECK; cudaMalloc(&d_imgOut, nbytes); CUDA_CHECK; cudaMemcpy(d_imgIn, imgIn, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK; // Launch kernel dim3 block = dim3(128, 1, 1); dim3 grid = dim3((sizeImg+block.x-1)/block.x, 1, 1); // Execute gamma correction Timer timer; timer.start(); gamma_correction <<<grid, block>>> (d_imgOut, d_imgIn, sizeImg, gamma); timer.end(); float t = timer.get(); // elapsed time in seconds // Copy back to CPU cudaMemcpy(imgOut, d_imgOut, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK; cudaFree(d_imgOut); CUDA_CHECK; cout << "Processor - GPU" << endl; ////////////////////////////////////////////////////////////////////////////////////////////////////// cout << "time: " << t*1000 << " ms" << endl; // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array convert_layered_to_mat(mOut, imgOut); showImage("Output", mOut, 100+w+40, 100); // ### Display your own output images here as needed #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; delete[] imgOut; // close all opencv windows cvDestroyAllWindows(); return 0; }
70d98bcd7e6a1e599846a58e6ecb694aea6dedd5.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2019 by Contributors * \file simple_dmatrix.cu */ #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/sort.h> #include <xgboost/data.h> #include "../common/random.h" #include "./simple_dmatrix.h" #include "device_adapter_hip.cuh" namespace xgboost { namespace data { template <typename AdapterBatchT> void CountRowOffsets(const AdapterBatchT& batch, common::Span<bst_row_t> offset, int device_idx, float missing) { IsValidFunctor is_valid(missing); // Count elements per row dh::LaunchN(device_idx, batch.Size(), [=] __device__(size_t idx) { auto element = batch.GetElement(idx); if (is_valid(element)) { atomicAdd(reinterpret_cast<unsigned long long*>( // NOLINT &offset[element.row_idx]), static_cast<unsigned long long>(1)); // NOLINT } }); dh::XGBCachingDeviceAllocator<char> alloc; thrust::exclusive_scan(thrust::hip::par(alloc), thrust::device_pointer_cast(offset.data()), thrust::device_pointer_cast(offset.data() + offset.size()), thrust::device_pointer_cast(offset.data())); } template <typename AdapterBatchT> struct COOToEntryOp { AdapterBatchT batch; __device__ Entry operator()(size_t idx) { const auto& e = batch.GetElement(idx); return Entry(e.column_idx, e.value); } }; // Here the data is already correctly ordered and simply needs to be compacted // to remove missing data template <typename AdapterT> void CopyDataToDMatrix(AdapterT* adapter, common::Span<Entry> data, float missing) { auto batch = adapter->Value(); auto counting = thrust::make_counting_iterator(0llu); dh::XGBCachingDeviceAllocator<char> alloc; COOToEntryOp<decltype(batch)> transform_op{batch}; thrust::transform_iterator<decltype(transform_op), decltype(counting)> transform_iter(counting, transform_op); auto begin_output = thrust::device_pointer_cast(data.data()); dh::CopyIf(transform_iter, transform_iter + batch.Size(), begin_output, IsValidFunctor(missing)); } // Does not currently support metainfo as no on-device data source contains this // Current implementation assumes a single batch. More batches can // be supported in future. Does not currently support inferring row/column size template <typename AdapterT> SimpleDMatrix::SimpleDMatrix(AdapterT* adapter, float missing, int nthread) { dh::safe_cuda(hipSetDevice(adapter->DeviceIdx())); CHECK(adapter->NumRows() != kAdapterUnknownSize); CHECK(adapter->NumColumns() != kAdapterUnknownSize); adapter->BeforeFirst(); adapter->Next(); auto& batch = adapter->Value(); sparse_page_.offset.SetDevice(adapter->DeviceIdx()); sparse_page_.data.SetDevice(adapter->DeviceIdx()); // Enforce single batch CHECK(!adapter->Next()); sparse_page_.offset.Resize(adapter->NumRows() + 1); auto s_offset = sparse_page_.offset.DeviceSpan(); CountRowOffsets(batch, s_offset, adapter->DeviceIdx(), missing); info_.num_nonzero_ = sparse_page_.offset.HostVector().back(); sparse_page_.data.Resize(info_.num_nonzero_); CopyDataToDMatrix(adapter, sparse_page_.data.DeviceSpan(), missing); info_.num_col_ = adapter->NumColumns(); info_.num_row_ = adapter->NumRows(); // Synchronise worker columns rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1); } template SimpleDMatrix::SimpleDMatrix(CudfAdapter* adapter, float missing, int nthread); template SimpleDMatrix::SimpleDMatrix(CupyAdapter* adapter, float missing, int nthread); } // namespace data } // namespace xgboost
70d98bcd7e6a1e599846a58e6ecb694aea6dedd5.cu
/*! * Copyright 2019 by Contributors * \file simple_dmatrix.cu */ #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/sort.h> #include <xgboost/data.h> #include "../common/random.h" #include "./simple_dmatrix.h" #include "device_adapter.cuh" namespace xgboost { namespace data { template <typename AdapterBatchT> void CountRowOffsets(const AdapterBatchT& batch, common::Span<bst_row_t> offset, int device_idx, float missing) { IsValidFunctor is_valid(missing); // Count elements per row dh::LaunchN(device_idx, batch.Size(), [=] __device__(size_t idx) { auto element = batch.GetElement(idx); if (is_valid(element)) { atomicAdd(reinterpret_cast<unsigned long long*>( // NOLINT &offset[element.row_idx]), static_cast<unsigned long long>(1)); // NOLINT } }); dh::XGBCachingDeviceAllocator<char> alloc; thrust::exclusive_scan(thrust::cuda::par(alloc), thrust::device_pointer_cast(offset.data()), thrust::device_pointer_cast(offset.data() + offset.size()), thrust::device_pointer_cast(offset.data())); } template <typename AdapterBatchT> struct COOToEntryOp { AdapterBatchT batch; __device__ Entry operator()(size_t idx) { const auto& e = batch.GetElement(idx); return Entry(e.column_idx, e.value); } }; // Here the data is already correctly ordered and simply needs to be compacted // to remove missing data template <typename AdapterT> void CopyDataToDMatrix(AdapterT* adapter, common::Span<Entry> data, float missing) { auto batch = adapter->Value(); auto counting = thrust::make_counting_iterator(0llu); dh::XGBCachingDeviceAllocator<char> alloc; COOToEntryOp<decltype(batch)> transform_op{batch}; thrust::transform_iterator<decltype(transform_op), decltype(counting)> transform_iter(counting, transform_op); auto begin_output = thrust::device_pointer_cast(data.data()); dh::CopyIf(transform_iter, transform_iter + batch.Size(), begin_output, IsValidFunctor(missing)); } // Does not currently support metainfo as no on-device data source contains this // Current implementation assumes a single batch. More batches can // be supported in future. Does not currently support inferring row/column size template <typename AdapterT> SimpleDMatrix::SimpleDMatrix(AdapterT* adapter, float missing, int nthread) { dh::safe_cuda(cudaSetDevice(adapter->DeviceIdx())); CHECK(adapter->NumRows() != kAdapterUnknownSize); CHECK(adapter->NumColumns() != kAdapterUnknownSize); adapter->BeforeFirst(); adapter->Next(); auto& batch = adapter->Value(); sparse_page_.offset.SetDevice(adapter->DeviceIdx()); sparse_page_.data.SetDevice(adapter->DeviceIdx()); // Enforce single batch CHECK(!adapter->Next()); sparse_page_.offset.Resize(adapter->NumRows() + 1); auto s_offset = sparse_page_.offset.DeviceSpan(); CountRowOffsets(batch, s_offset, adapter->DeviceIdx(), missing); info_.num_nonzero_ = sparse_page_.offset.HostVector().back(); sparse_page_.data.Resize(info_.num_nonzero_); CopyDataToDMatrix(adapter, sparse_page_.data.DeviceSpan(), missing); info_.num_col_ = adapter->NumColumns(); info_.num_row_ = adapter->NumRows(); // Synchronise worker columns rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1); } template SimpleDMatrix::SimpleDMatrix(CudfAdapter* adapter, float missing, int nthread); template SimpleDMatrix::SimpleDMatrix(CupyAdapter* adapter, float missing, int nthread); } // namespace data } // namespace xgboost
5909337912d46b5e1a43ca505e1a6950c710cd6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2017 Darius Rckert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/imageProcessing/NppiHelper.h" // #include "saiga/core/framework/framework.h" #include "saiga/core/image/all.h" #include "saiga/cuda/CudaInfo.h" #include "saiga/cuda/imageProcessing/image.h" #include "gtest/gtest.h" #include "compare_numbers.h" namespace Saiga { TEST(CudaSimple, Memcpy) { int N = 10000; std::vector<int> h_data(N); for (auto& i : h_data) { i = Random::uniformInt(0, 100000); } size_t size = sizeof(int) * N; int* d_data; hipMalloc((void**)&d_data, size); int* d_data2; hipMalloc((void**)&d_data2, size); hipMemcpy(d_data, h_data.data(), size, hipMemcpyHostToDevice); hipMemcpy(d_data2, d_data, size, hipMemcpyDeviceToDevice); std::vector<int> h_data2(N); hipMemcpy(h_data2.data(), d_data2, size, hipMemcpyDeviceToHost); EXPECT_EQ(h_data, h_data2); } __global__ static void addFive(int* data, int N) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= N) return; data[tid] = data[tid] + 5; } TEST(CudaSimple, AddFive) { int N = 10000; std::vector<int> h_data(N); for (auto& i : h_data) { i = Random::uniformInt(0, 100000); } size_t size = sizeof(int) * N; int* d_data; hipMalloc((void**)&d_data, size); hipMemcpy(d_data, h_data.data(), size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( addFive), dim3(iDivUp(N, 128)), dim3(128), 0, 0, d_data, N); std::vector<int> h_data2(N); hipMemcpy(h_data2.data(), d_data, size, hipMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { EXPECT_EQ(h_data2[i], h_data[i] + 5); } } } // namespace Saiga int main() { Saiga::CUDA::initCUDA(); Saiga::CUDA::printCUDAInfo(); Saiga::initSaigaSampleNoWindow(); testing::InitGoogleTest(); return RUN_ALL_TESTS(); }
5909337912d46b5e1a43ca505e1a6950c710cd6b.cu
/** * Copyright (c) 2017 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/cuda/imageProcessing/NppiHelper.h" // #include "saiga/core/framework/framework.h" #include "saiga/core/image/all.h" #include "saiga/cuda/CudaInfo.h" #include "saiga/cuda/imageProcessing/image.h" #include "gtest/gtest.h" #include "compare_numbers.h" namespace Saiga { TEST(CudaSimple, Memcpy) { int N = 10000; std::vector<int> h_data(N); for (auto& i : h_data) { i = Random::uniformInt(0, 100000); } size_t size = sizeof(int) * N; int* d_data; cudaMalloc((void**)&d_data, size); int* d_data2; cudaMalloc((void**)&d_data2, size); cudaMemcpy(d_data, h_data.data(), size, cudaMemcpyHostToDevice); cudaMemcpy(d_data2, d_data, size, cudaMemcpyDeviceToDevice); std::vector<int> h_data2(N); cudaMemcpy(h_data2.data(), d_data2, size, cudaMemcpyDeviceToHost); EXPECT_EQ(h_data, h_data2); } __global__ static void addFive(int* data, int N) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= N) return; data[tid] = data[tid] + 5; } TEST(CudaSimple, AddFive) { int N = 10000; std::vector<int> h_data(N); for (auto& i : h_data) { i = Random::uniformInt(0, 100000); } size_t size = sizeof(int) * N; int* d_data; cudaMalloc((void**)&d_data, size); cudaMemcpy(d_data, h_data.data(), size, cudaMemcpyHostToDevice); addFive<<<iDivUp(N, 128), 128>>>(d_data, N); std::vector<int> h_data2(N); cudaMemcpy(h_data2.data(), d_data, size, cudaMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { EXPECT_EQ(h_data2[i], h_data[i] + 5); } } } // namespace Saiga int main() { Saiga::CUDA::initCUDA(); Saiga::CUDA::printCUDAInfo(); Saiga::initSaigaSampleNoWindow(); testing::InitGoogleTest(); return RUN_ALL_TESTS(); }
a876a2f50a6ef3c782e17bf7c801ed21876935c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> //Code from https://devblogs.nvidia.com/even-easier-introduction-cuda/ //Saved for reference // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
a876a2f50a6ef3c782e17bf7c801ed21876935c9.cu
#include <iostream> #include <math.h> //Code from https://devblogs.nvidia.com/even-easier-introduction-cuda/ //Saved for reference // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU add<<<1, 1>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
c81a558e35e0d1a706234970af7c69f5297081e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/math/sequence2batch.h" namespace paddle { namespace operators { namespace math { template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void CopyMatrixRowsKernel(const T* src, T* dst, const size_t* index, int64_t height, int64_t width, bool is_src_index) { int idx = threadIdx.x; int idy = threadIdx.y; int id = blockIdx.x + idy * GridDimX; while (id < height) { int src_idx = is_src_index ? index[id] : id; int dst_idx = is_src_index ? id : index[id]; const T* src_data = src + src_idx * width; T* dst_data = dst + dst_idx * width; for (int i = idx; i < width; i += BlockDimX) { dst_data[i] = src_data[i]; } id += BlockDimY * GridDimX; } } template <typename T> class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& src, const size_t* index, framework::Tensor& dst, bool is_src_index) { auto src_dims = src.dims(); auto dst_dims = dst.dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2, "The src must be matrix with rank 2."); PADDLE_ENFORCE_EQ(dst_dims.size(), 2, "The dst must be matrix with rank 2."); PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1], "The width of src and dst must be same."); auto height = dst_dims[0]; auto width = dst_dims[1]; auto* src_data = src.data<T>(); auto* dst_data = dst.data<T>(); dim3 threads(128, 8); dim3 grid(8, 1); auto stream = context.stream(); hipLaunchKernelGGL(( CopyMatrixRowsKernel<T, 128, 8, 8>), dim3(grid), dim3(threads), 0, stream, src_data, dst_data, index, height, width, is_src_index); } }; template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, float>; template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, double>; template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, float>; template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, double>; template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, float>; template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
c81a558e35e0d1a706234970af7c69f5297081e1.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/math/sequence2batch.h" namespace paddle { namespace operators { namespace math { template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void CopyMatrixRowsKernel(const T* src, T* dst, const size_t* index, int64_t height, int64_t width, bool is_src_index) { int idx = threadIdx.x; int idy = threadIdx.y; int id = blockIdx.x + idy * GridDimX; while (id < height) { int src_idx = is_src_index ? index[id] : id; int dst_idx = is_src_index ? id : index[id]; const T* src_data = src + src_idx * width; T* dst_data = dst + dst_idx * width; for (int i = idx; i < width; i += BlockDimX) { dst_data[i] = src_data[i]; } id += BlockDimY * GridDimX; } } template <typename T> class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& src, const size_t* index, framework::Tensor& dst, bool is_src_index) { auto src_dims = src.dims(); auto dst_dims = dst.dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2, "The src must be matrix with rank 2."); PADDLE_ENFORCE_EQ(dst_dims.size(), 2, "The dst must be matrix with rank 2."); PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1], "The width of src and dst must be same."); auto height = dst_dims[0]; auto width = dst_dims[1]; auto* src_data = src.data<T>(); auto* dst_data = dst.data<T>(); dim3 threads(128, 8); dim3 grid(8, 1); auto stream = context.stream(); CopyMatrixRowsKernel<T, 128, 8, 8><<<grid, threads, 0, stream>>>( src_data, dst_data, index, height, width, is_src_index); } }; template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, float>; template class CopyMatrixRowsFunctor<platform::CUDADeviceContext, double>; template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, float>; template class LoDTensor2BatchFunctor<platform::CUDADeviceContext, double>; template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, float>; template class Batch2LoDTensorFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
42b5b3eceb4300a191de16456f8a6fd07e911596.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Host side code that calls a GPU kernel to perform vector addition on the GPU using a single thread block. We restrict the size of the vector to be up to 1024 elements which is the maximum thread block size on this GPU. Author: Naga Kandasamy Date modified: 02/18/2018 */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> #define NUM_ELEMENTS 1024 /* Include the kernel code during the compiler preprocessing step. */ #include "vector_addition_kernel.cu" void run_test (void); void compute_on_device (float *, float *, float *, int); extern "C" void compute_gold (float *, float *, float *, int); int main (int argc, char** argv) { run_test (); return 0; } /* Perform vector addition on the CPU and the GPU. */ void run_test (void) { int num_elements = NUM_ELEMENTS; float diff; int i; /* Allocate memory on the CPU for the input vectors A and B, and the output vector C. */ int vector_length = sizeof(float) * num_elements; float *A = (float *)malloc (vector_length); float *B = (float *)malloc (vector_length); float *gold_result = (float *)malloc (vector_length); /* The result vector computed on the CPU. */ float *gpu_result = (float *)malloc (vector_length); /* The result vector computed on the GPU. */ /* Randomly generate input data. Initialize the input data to be integer values between 0 and 100. */ for(i = 0; i < num_elements; i++){ A[i] = floorf (100*(rand()/(float)RAND_MAX)); B[i] = floorf (100*(rand()/(float)RAND_MAX)); } /* Compute the reference solution on the CPU. */ compute_gold (A, B, gold_result, num_elements); /* Compute the result vector on the GPU. */ compute_on_device (A, B, gpu_result, num_elements); /* Compute the differences between the CPU and GPU results. */ diff = 0.0; for(i = 0; i < num_elements; i++) diff += fabs (gold_result[i] - gpu_result[i]); printf("Difference between the CPU and GPU result: %f. \n", diff); /* Cleanup memory. */ free(A); free(B); free(gold_result); free(gpu_result); return; } /* Vector addition on GPU. */ void compute_on_device (float *A_on_host, float *B_on_host, float *gpu_result, int num_elements) { float *A_on_device = NULL; float *B_on_device = NULL; float *C_on_device = NULL; /* Allocate space on the GPU for vectors A and B, and copy the contents of the vectors to the GPU. */ hipMalloc ((void**)&A_on_device, num_elements*sizeof(float)); hipMemcpy (A_on_device, A_on_host, num_elements*sizeof(float), hipMemcpyHostToDevice); hipMalloc ((void**)&B_on_device, num_elements*sizeof(float)); hipMemcpy (B_on_device, B_on_host, num_elements*sizeof(float), hipMemcpyHostToDevice); /* Allocate space for the result vector on the GPU. */ hipMalloc ((void**)&C_on_device, num_elements*sizeof(float)); /* Set up the execution grid on the GPU. */ dim3 thread_block(num_elements, 1, 1); /* Set the number of threads in the thread block. */ dim3 grid(1,1); hipLaunchKernelGGL(( vector_addition_kernel), dim3(grid), dim3(thread_block), 0, 0, A_on_device, B_on_device, C_on_device, num_elements); /* Copy the result vector back from the GPU. */ hipMemcpy (gpu_result, C_on_device, num_elements*sizeof(float), hipMemcpyDeviceToHost); /* Free memory on the GPU. */ hipFree (A_on_device); hipFree (B_on_device); hipFree (C_on_device); }
42b5b3eceb4300a191de16456f8a6fd07e911596.cu
/* Host side code that calls a GPU kernel to perform vector addition on the GPU using a single thread block. We restrict the size of the vector to be up to 1024 elements which is the maximum thread block size on this GPU. Author: Naga Kandasamy Date modified: 02/18/2018 */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> #define NUM_ELEMENTS 1024 /* Include the kernel code during the compiler preprocessing step. */ #include "vector_addition_kernel.cu" void run_test (void); void compute_on_device (float *, float *, float *, int); extern "C" void compute_gold (float *, float *, float *, int); int main (int argc, char** argv) { run_test (); return 0; } /* Perform vector addition on the CPU and the GPU. */ void run_test (void) { int num_elements = NUM_ELEMENTS; float diff; int i; /* Allocate memory on the CPU for the input vectors A and B, and the output vector C. */ int vector_length = sizeof(float) * num_elements; float *A = (float *)malloc (vector_length); float *B = (float *)malloc (vector_length); float *gold_result = (float *)malloc (vector_length); /* The result vector computed on the CPU. */ float *gpu_result = (float *)malloc (vector_length); /* The result vector computed on the GPU. */ /* Randomly generate input data. Initialize the input data to be integer values between 0 and 100. */ for(i = 0; i < num_elements; i++){ A[i] = floorf (100*(rand()/(float)RAND_MAX)); B[i] = floorf (100*(rand()/(float)RAND_MAX)); } /* Compute the reference solution on the CPU. */ compute_gold (A, B, gold_result, num_elements); /* Compute the result vector on the GPU. */ compute_on_device (A, B, gpu_result, num_elements); /* Compute the differences between the CPU and GPU results. */ diff = 0.0; for(i = 0; i < num_elements; i++) diff += fabs (gold_result[i] - gpu_result[i]); printf("Difference between the CPU and GPU result: %f. \n", diff); /* Cleanup memory. */ free(A); free(B); free(gold_result); free(gpu_result); return; } /* Vector addition on GPU. */ void compute_on_device (float *A_on_host, float *B_on_host, float *gpu_result, int num_elements) { float *A_on_device = NULL; float *B_on_device = NULL; float *C_on_device = NULL; /* Allocate space on the GPU for vectors A and B, and copy the contents of the vectors to the GPU. */ cudaMalloc ((void**)&A_on_device, num_elements*sizeof(float)); cudaMemcpy (A_on_device, A_on_host, num_elements*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc ((void**)&B_on_device, num_elements*sizeof(float)); cudaMemcpy (B_on_device, B_on_host, num_elements*sizeof(float), cudaMemcpyHostToDevice); /* Allocate space for the result vector on the GPU. */ cudaMalloc ((void**)&C_on_device, num_elements*sizeof(float)); /* Set up the execution grid on the GPU. */ dim3 thread_block(num_elements, 1, 1); /* Set the number of threads in the thread block. */ dim3 grid(1,1); vector_addition_kernel<<<grid, thread_block>>>(A_on_device, B_on_device, C_on_device, num_elements); /* Copy the result vector back from the GPU. */ cudaMemcpy (gpu_result, C_on_device, num_elements*sizeof(float), cudaMemcpyDeviceToHost); /* Free memory on the GPU. */ cudaFree (A_on_device); cudaFree (B_on_device); cudaFree (C_on_device); }
copypipeline.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../include/copypipeline.cuh" //#include "gpu_util_hip.cuh" using namespace std; /* ========================= * ICopySegment * ========================= */ ICopySegment::ICopySegment(IBroadcastNetwork& parent, int deviceID, Queue<int>* finishQueue) : _parent(&parent), _prev(NULL), _stream(NULL), _deviceID(deviceID), _finishQueue(finishQueue), Thread(true, getDeviceCPUs(parent.getSourceDeviceID())) { _execDeviceID = _deviceID; } ICopySegment::~ICopySegment() { if (_stream != NULL) { checkCudaErrors(hipStreamDestroy(_stream)); } } void* ICopySegment::run() { assert(_execDeviceID != DEVICE_HOST); NVMatrix::setDeviceID(_execDeviceID); checkCudaErrors(hipStreamCreateWithFlags(&_stream, hipStreamNonBlocking)); bool exit = false; while (!exit) { CopyMessage& msg = *_queue.dequeue(); if (msg.getType() == CopyMessage::EXIT) { exit = true; } else { bool term = processMessage(msg); if (term) { assert(_finishQueue != NULL); _finishQueue->enqueue(1); } } delete &msg; } return NULL; } NVMatrix& ICopySegment::getChunk(NVMatrix& mat, int chunkSize, int chunkIdx) { NVMatrix& line = mat.reshaped(1, mat.getNumElements()); int start = chunkIdx * chunkSize; int end = min((chunkIdx+1) * chunkSize, mat.getNumElements()); NVMatrix& chunk = line.sliceCols(start, end); delete &line; return chunk; } inline NVMatrix& ICopySegment::getMatrix(CopyMessage& msg) { if (getDeviceID() == DEVICE_HOST) { return _hmat; } return msg.getMatrix(getDeviceID()); } Queue<CopyMessage*>& ICopySegment::getQueue() { return _queue; } inline int ICopySegment::getDeviceID() { return _deviceID; } void ICopySegment::addPrev(ICopySegment& c) { _prev = &c; if (_deviceID == DEVICE_HOST) { _execDeviceID = c.getDeviceID(); } } void ICopySegment::addNext(CopyPeer& c) { _next.push_back(&c); c.addPrev(*this); } bool ICopySegment::isTerminal() const { return _next.size() == 0; } /* ========================= * CopySource * ========================= */ CopySource::CopySource(IBroadcastNetwork& parent, int deviceID) : ICopySegment(parent, deviceID, NULL) { } bool CopySource::processMessage(CopyMessage& msg) { assert(msg.getType() == CopyMessage::COPY_START); int numChunks = min(getMatrix(msg).getNumElements(), max(COPY_MIN_CHUNKS, min(COPY_MAX_CHUNKS, DIVUP(getMatrix(msg).getNumElements(), COPY_MIN_CHUNK_SIZE)))); int chunkSize = DIVUP(getMatrix(msg).getNumElements(), numChunks); // printf("num chunks: %d\n", numChunks); for (int c = 0; c <= numChunks; ++c) { for (vector<CopyPeer*>::const_iterator it = _next.begin(); it != _next.end(); ++it) { (*it)->getQueue().enqueue(new CopyChunkMessage(c, chunkSize, numChunks, msg.getScaleSource(), msg.getScaleTargets(), msg.getMatrices())); } } return false; } inline bool CopySource::isSource() const { return true; } /* ========================= * CopyPeer * ========================= */ CopyPeer::CopyPeer(IBroadcastNetwork& parent, int deviceID, Queue<int>* finishQueue) : ICopySegment(parent, deviceID, finishQueue) { } bool CopyPeer::processMessage(CopyMessage& msg) { assert(msg.getType() == CopyMessage::COPY_CHUNK); CopyChunkMessage& cmsg = *static_cast<CopyChunkMessage*>(&msg); if (cmsg.getChunkIdx() < cmsg.getNumChunks()) { if (!isTerminal() || (isTerminal() && msg.getScaleTargets() == 0)) { getMatrix(msg).resize(_prev->getMatrix(msg)); } // getMatrix(msg).printShape("getMatrix(msg)"); // _prev->getMatrix(msg).printShape("_prev->getMatrix(msg)"); assert(getMatrix(msg).isSameDims(_prev->getMatrix(msg))); const float scaleSelf = isTerminal() ? msg.getScaleTargets() : 0; const float scalePrev = _prev->isSource() ? msg.getScaleSource() : 1; NVMatrix& prevChunk = getChunk(_prev->getMatrix(msg), cmsg.getChunkSize(), cmsg.getChunkIdx()); NVMatrix& myChunk = getChunk(getMatrix(msg), cmsg.getChunkSize(), cmsg.getChunkIdx()); prevChunk.add(myChunk, scalePrev, scaleSelf, myChunk, _stream); NVMatrix::syncStream(_stream); delete &prevChunk; delete &myChunk; } for (vector<CopyPeer*>::const_iterator it = _next.begin(); it != _next.end(); ++it) { (*it)->getQueue().enqueue(new CopyChunkMessage(cmsg)); } return cmsg.getChunkIdx() >= cmsg.getNumChunks() && isTerminal(); } inline bool CopyPeer::isSource() const { return false; } /* ========================= * IBroadcastNetwork * ========================= */ IBroadcastNetwork& IBroadcastNetwork::make(set<int> devices, int srcDevice) { if (devices.size() == 8) { return (new EightGPUBroadcaster1(devices, srcDevice))->construct(); } else if (devices.size() == 1) { return (new NullBroadcaster(devices, srcDevice))->construct(); } else if (devices.size() == 2 && NVMatrix::canAccessPeer(*devices.begin(), *(++devices.begin()))) { return (new TwoPeeringGPUsBroadcaster(devices, srcDevice))->construct(); } return (new NaiveBroadcaster(devices, srcDevice))->construct(); } IBroadcastNetwork::IBroadcastNetwork(set<int>& devices, int srcDeviceID, int numTerminal) : _devices(devices), _srcDeviceID(srcDeviceID), _numTerminal(numTerminal), _constructed(false), _src(NULL) { } IBroadcastNetwork::~IBroadcastNetwork() { vector<ICopySegment*> v; v.insert(v.end(), _peers.begin(), _peers.end()); v.insert(v.end(), _src); for (vector<ICopySegment*>::const_iterator it = v.begin(); it != v.end(); ++it) { if ((*it) != NULL) { (*it)->getQueue().enqueue(new CopyMessage(CopyMessage::EXIT)); (*it)->join(); delete *it; } } } IBroadcastNetwork& IBroadcastNetwork::construct() { assert(!_constructed); pair<vector<int>,vector<int> > gpus = makeGPULists(); _src = new CopySource(*this, _srcDeviceID); makePeers(gpus); makeConnections(); _src->start(); for (vector<CopyPeer*>::const_iterator it = _peers.begin(); it != _peers.end(); ++it) { (*it)->start(); } _constructed = true; return *this; } pair<vector<int>,vector<int> > IBroadcastNetwork::makeGPULists() { vector<int> same, other; for (set<int>::const_iterator it = _devices.begin(); it != _devices.end(); ++it) { if (*it != _srcDeviceID) { if (NVMatrix::canAccessPeer(_srcDeviceID, *it)) { same.insert(same.begin() + rand() % (1 + same.size()), *it); } else { other.insert(other.begin() + rand() % (1 + other.size()), *it); } } } return pair<vector<int>,vector<int> >(same, other); } void IBroadcastNetwork::broadcast(std::map<int, NVMatrix*>& mats) { _broadcast(mats, 1, 0); } void IBroadcastNetwork::_broadcast(std::map<int, NVMatrix*>& mats, float scaleSource, float scaleTargets) { assert(_constructed); assert(_finishQueue.getNumElements() == 0); assert(mats.size() == _devices.size()); assert(mats.size() > 1); if (mats[_srcDeviceID]->getNumElements() == 0) { for (map<int,NVMatrix*>::const_iterator it = mats.begin(); it != mats.end(); ++it) { it->second->resize(*mats[_srcDeviceID]); } } else { _src->getQueue().enqueue(new CopyStartMessage(scaleSource, scaleTargets, mats)); for (int i = 0; i < _numTerminal; ++i) { _finishQueue.dequeue(); } } assert(_finishQueue.getNumElements() == 0); } int IBroadcastNetwork::getSourceDeviceID() const { return _srcDeviceID; } void IBroadcastNetwork::makePeers(pair<vector<int>,vector<int> >& gpus) { vector<int>& same = gpus.first, &other = gpus.second; for (int i = 0; i < same.size(); ++i) { _peers.push_back(new CopyPeer(*this, same[i], &_finishQueue)); } for (int i = 0; i < other.size(); ++i) { _peers.push_back(new CopyPeer(*this, other[i], &_finishQueue)); } _peers.push_back(new CopyPeer(*this, DEVICE_HOST, &_finishQueue)); // peers[7] } /* ========================= * ISafeBroadcastNetwork * ========================= */ ISafeBroadcastNetwork& ISafeBroadcastNetwork::make(set<int> devices, int srcDevice) { if (devices.size() == 1) { return (new NullBroadcaster(devices, srcDevice))->construct(); } else if (devices.size() == 2 && NVMatrix::canAccessPeer(*devices.begin(), *(++devices.begin()))) { return (new TwoPeeringGPUsBroadcaster(devices, srcDevice))->construct(); } return (new NaiveBroadcaster(devices, srcDevice))->construct(); } ISafeBroadcastNetwork::ISafeBroadcastNetwork(std::set<int>& devices, int srcDeviceID, int numTerminal) : IBroadcastNetwork(devices, srcDeviceID, numTerminal) { } void ISafeBroadcastNetwork::broadcast(std::map<int, NVMatrix*>& mats, float scaleSource, float scaleTargets) { _broadcast(mats, scaleSource, scaleTargets); } ISafeBroadcastNetwork& ISafeBroadcastNetwork::construct() { IBroadcastNetwork::construct(); return *this; } /* ========================= * NullBroadcaster * ========================= */ NullBroadcaster::NullBroadcaster(std::set<int>& devices, int srcDeviceID) : ISafeBroadcastNetwork(devices, srcDeviceID, 0) { } void NullBroadcaster::makeConnections() { } NullBroadcaster& NullBroadcaster::construct() { _constructed = true; return *this; } void NullBroadcaster::broadcast(std::map<int, NVMatrix*>& mats, float scaleSource, float scaleTargets) { } void NullBroadcaster::broadcast(std::map<int, NVMatrix*>& mats) { } /* ========================= * NaiveBroadcaster * ========================= * * This one does src -> host -> all */ NaiveBroadcaster::NaiveBroadcaster(std::set<int>& devices, int srcDeviceID) : ISafeBroadcastNetwork(devices, srcDeviceID, devices.size()-1) { } void NaiveBroadcaster::makeConnections() { _src->addNext(*_peers.back()); // Make connection src -> host for (int i = 0; i < _peers.size() - 1; ++i) { if (_peers[i]->getDeviceID() != _src->getDeviceID()) { _peers.back()->addNext(*_peers[i]); // Make connection host -> peer } } } /* ========================= * EightGPUBroadcaster1 * ========================= * * This one does a fancy graph */ EightGPUBroadcaster1::EightGPUBroadcaster1(set<int>& devices, int srcDeviceID) : IBroadcastNetwork(devices, srcDeviceID, 4) { } void EightGPUBroadcaster1::makeConnections() { _src->addNext(*_peers[7]); _peers[7]->addNext(*_peers[0]); _peers[7]->addNext(*_peers[1]); _peers[7]->addNext(*_peers[3]); _peers[7]->addNext(*_peers[4]); _peers[1]->addNext(*_peers[2]); _peers[3]->addNext(*_peers[5]); _peers[4]->addNext(*_peers[6]); } /* ========================= * TwoPeeringGPUsBroadcaster * ========================= */ TwoPeeringGPUsBroadcaster::TwoPeeringGPUsBroadcaster(std::set<int>& devices, int srcDeviceID) : ISafeBroadcastNetwork(devices, srcDeviceID, 0) { _tgtDeviceID = *devices.begin() == srcDeviceID ? *(++devices.begin()) : *devices.begin(); } TwoPeeringGPUsBroadcaster::~TwoPeeringGPUsBroadcaster() { if (_constructed) { checkCudaErrors(hipStreamDestroy(_tgtStream)); } } void TwoPeeringGPUsBroadcaster::makeConnections() { } void TwoPeeringGPUsBroadcaster::resetDeviceID(int d) { if (d >= 0) { NVMatrix::setDeviceID(d); } } ISafeBroadcastNetwork& TwoPeeringGPUsBroadcaster::construct() { assert(!_constructed); int d = NVMatrix::getDeviceID(); NVMatrix::setDeviceID(_tgtDeviceID); checkCudaErrors(hipStreamCreateWithFlags(&_tgtStream, hipStreamNonBlocking)); resetDeviceID(d); _constructed = true; return *this; } void TwoPeeringGPUsBroadcaster::_broadcast(std::map<int, NVMatrix*>& mats, float scaleSource, float scaleTargets) { int d = NVMatrix::getDeviceID(); NVMatrix::setDeviceID(_tgtDeviceID); mats[_tgtDeviceID]->add(*mats[_srcDeviceID], scaleTargets, scaleSource, *mats[_tgtDeviceID], _tgtStream); NVMatrix::syncStream(_tgtStream); resetDeviceID(d); }
copypipeline.cu
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../include/copypipeline.cuh" //#include "gpu_util.cuh" using namespace std; /* ========================= * ICopySegment * ========================= */ ICopySegment::ICopySegment(IBroadcastNetwork& parent, int deviceID, Queue<int>* finishQueue) : _parent(&parent), _prev(NULL), _stream(NULL), _deviceID(deviceID), _finishQueue(finishQueue), Thread(true, getDeviceCPUs(parent.getSourceDeviceID())) { _execDeviceID = _deviceID; } ICopySegment::~ICopySegment() { if (_stream != NULL) { checkCudaErrors(cudaStreamDestroy(_stream)); } } void* ICopySegment::run() { assert(_execDeviceID != DEVICE_HOST); NVMatrix::setDeviceID(_execDeviceID); checkCudaErrors(cudaStreamCreateWithFlags(&_stream, cudaStreamNonBlocking)); bool exit = false; while (!exit) { CopyMessage& msg = *_queue.dequeue(); if (msg.getType() == CopyMessage::EXIT) { exit = true; } else { bool term = processMessage(msg); if (term) { assert(_finishQueue != NULL); _finishQueue->enqueue(1); } } delete &msg; } return NULL; } NVMatrix& ICopySegment::getChunk(NVMatrix& mat, int chunkSize, int chunkIdx) { NVMatrix& line = mat.reshaped(1, mat.getNumElements()); int start = chunkIdx * chunkSize; int end = min((chunkIdx+1) * chunkSize, mat.getNumElements()); NVMatrix& chunk = line.sliceCols(start, end); delete &line; return chunk; } inline NVMatrix& ICopySegment::getMatrix(CopyMessage& msg) { if (getDeviceID() == DEVICE_HOST) { return _hmat; } return msg.getMatrix(getDeviceID()); } Queue<CopyMessage*>& ICopySegment::getQueue() { return _queue; } inline int ICopySegment::getDeviceID() { return _deviceID; } void ICopySegment::addPrev(ICopySegment& c) { _prev = &c; if (_deviceID == DEVICE_HOST) { _execDeviceID = c.getDeviceID(); } } void ICopySegment::addNext(CopyPeer& c) { _next.push_back(&c); c.addPrev(*this); } bool ICopySegment::isTerminal() const { return _next.size() == 0; } /* ========================= * CopySource * ========================= */ CopySource::CopySource(IBroadcastNetwork& parent, int deviceID) : ICopySegment(parent, deviceID, NULL) { } bool CopySource::processMessage(CopyMessage& msg) { assert(msg.getType() == CopyMessage::COPY_START); int numChunks = min(getMatrix(msg).getNumElements(), max(COPY_MIN_CHUNKS, min(COPY_MAX_CHUNKS, DIVUP(getMatrix(msg).getNumElements(), COPY_MIN_CHUNK_SIZE)))); int chunkSize = DIVUP(getMatrix(msg).getNumElements(), numChunks); // printf("num chunks: %d\n", numChunks); for (int c = 0; c <= numChunks; ++c) { for (vector<CopyPeer*>::const_iterator it = _next.begin(); it != _next.end(); ++it) { (*it)->getQueue().enqueue(new CopyChunkMessage(c, chunkSize, numChunks, msg.getScaleSource(), msg.getScaleTargets(), msg.getMatrices())); } } return false; } inline bool CopySource::isSource() const { return true; } /* ========================= * CopyPeer * ========================= */ CopyPeer::CopyPeer(IBroadcastNetwork& parent, int deviceID, Queue<int>* finishQueue) : ICopySegment(parent, deviceID, finishQueue) { } bool CopyPeer::processMessage(CopyMessage& msg) { assert(msg.getType() == CopyMessage::COPY_CHUNK); CopyChunkMessage& cmsg = *static_cast<CopyChunkMessage*>(&msg); if (cmsg.getChunkIdx() < cmsg.getNumChunks()) { if (!isTerminal() || (isTerminal() && msg.getScaleTargets() == 0)) { getMatrix(msg).resize(_prev->getMatrix(msg)); } // getMatrix(msg).printShape("getMatrix(msg)"); // _prev->getMatrix(msg).printShape("_prev->getMatrix(msg)"); assert(getMatrix(msg).isSameDims(_prev->getMatrix(msg))); const float scaleSelf = isTerminal() ? msg.getScaleTargets() : 0; const float scalePrev = _prev->isSource() ? msg.getScaleSource() : 1; NVMatrix& prevChunk = getChunk(_prev->getMatrix(msg), cmsg.getChunkSize(), cmsg.getChunkIdx()); NVMatrix& myChunk = getChunk(getMatrix(msg), cmsg.getChunkSize(), cmsg.getChunkIdx()); prevChunk.add(myChunk, scalePrev, scaleSelf, myChunk, _stream); NVMatrix::syncStream(_stream); delete &prevChunk; delete &myChunk; } for (vector<CopyPeer*>::const_iterator it = _next.begin(); it != _next.end(); ++it) { (*it)->getQueue().enqueue(new CopyChunkMessage(cmsg)); } return cmsg.getChunkIdx() >= cmsg.getNumChunks() && isTerminal(); } inline bool CopyPeer::isSource() const { return false; } /* ========================= * IBroadcastNetwork * ========================= */ IBroadcastNetwork& IBroadcastNetwork::make(set<int> devices, int srcDevice) { if (devices.size() == 8) { return (new EightGPUBroadcaster1(devices, srcDevice))->construct(); } else if (devices.size() == 1) { return (new NullBroadcaster(devices, srcDevice))->construct(); } else if (devices.size() == 2 && NVMatrix::canAccessPeer(*devices.begin(), *(++devices.begin()))) { return (new TwoPeeringGPUsBroadcaster(devices, srcDevice))->construct(); } return (new NaiveBroadcaster(devices, srcDevice))->construct(); } IBroadcastNetwork::IBroadcastNetwork(set<int>& devices, int srcDeviceID, int numTerminal) : _devices(devices), _srcDeviceID(srcDeviceID), _numTerminal(numTerminal), _constructed(false), _src(NULL) { } IBroadcastNetwork::~IBroadcastNetwork() { vector<ICopySegment*> v; v.insert(v.end(), _peers.begin(), _peers.end()); v.insert(v.end(), _src); for (vector<ICopySegment*>::const_iterator it = v.begin(); it != v.end(); ++it) { if ((*it) != NULL) { (*it)->getQueue().enqueue(new CopyMessage(CopyMessage::EXIT)); (*it)->join(); delete *it; } } } IBroadcastNetwork& IBroadcastNetwork::construct() { assert(!_constructed); pair<vector<int>,vector<int> > gpus = makeGPULists(); _src = new CopySource(*this, _srcDeviceID); makePeers(gpus); makeConnections(); _src->start(); for (vector<CopyPeer*>::const_iterator it = _peers.begin(); it != _peers.end(); ++it) { (*it)->start(); } _constructed = true; return *this; } pair<vector<int>,vector<int> > IBroadcastNetwork::makeGPULists() { vector<int> same, other; for (set<int>::const_iterator it = _devices.begin(); it != _devices.end(); ++it) { if (*it != _srcDeviceID) { if (NVMatrix::canAccessPeer(_srcDeviceID, *it)) { same.insert(same.begin() + rand() % (1 + same.size()), *it); } else { other.insert(other.begin() + rand() % (1 + other.size()), *it); } } } return pair<vector<int>,vector<int> >(same, other); } void IBroadcastNetwork::broadcast(std::map<int, NVMatrix*>& mats) { _broadcast(mats, 1, 0); } void IBroadcastNetwork::_broadcast(std::map<int, NVMatrix*>& mats, float scaleSource, float scaleTargets) { assert(_constructed); assert(_finishQueue.getNumElements() == 0); assert(mats.size() == _devices.size()); assert(mats.size() > 1); if (mats[_srcDeviceID]->getNumElements() == 0) { for (map<int,NVMatrix*>::const_iterator it = mats.begin(); it != mats.end(); ++it) { it->second->resize(*mats[_srcDeviceID]); } } else { _src->getQueue().enqueue(new CopyStartMessage(scaleSource, scaleTargets, mats)); for (int i = 0; i < _numTerminal; ++i) { _finishQueue.dequeue(); } } assert(_finishQueue.getNumElements() == 0); } int IBroadcastNetwork::getSourceDeviceID() const { return _srcDeviceID; } void IBroadcastNetwork::makePeers(pair<vector<int>,vector<int> >& gpus) { vector<int>& same = gpus.first, &other = gpus.second; for (int i = 0; i < same.size(); ++i) { _peers.push_back(new CopyPeer(*this, same[i], &_finishQueue)); } for (int i = 0; i < other.size(); ++i) { _peers.push_back(new CopyPeer(*this, other[i], &_finishQueue)); } _peers.push_back(new CopyPeer(*this, DEVICE_HOST, &_finishQueue)); // peers[7] } /* ========================= * ISafeBroadcastNetwork * ========================= */ ISafeBroadcastNetwork& ISafeBroadcastNetwork::make(set<int> devices, int srcDevice) { if (devices.size() == 1) { return (new NullBroadcaster(devices, srcDevice))->construct(); } else if (devices.size() == 2 && NVMatrix::canAccessPeer(*devices.begin(), *(++devices.begin()))) { return (new TwoPeeringGPUsBroadcaster(devices, srcDevice))->construct(); } return (new NaiveBroadcaster(devices, srcDevice))->construct(); } ISafeBroadcastNetwork::ISafeBroadcastNetwork(std::set<int>& devices, int srcDeviceID, int numTerminal) : IBroadcastNetwork(devices, srcDeviceID, numTerminal) { } void ISafeBroadcastNetwork::broadcast(std::map<int, NVMatrix*>& mats, float scaleSource, float scaleTargets) { _broadcast(mats, scaleSource, scaleTargets); } ISafeBroadcastNetwork& ISafeBroadcastNetwork::construct() { IBroadcastNetwork::construct(); return *this; } /* ========================= * NullBroadcaster * ========================= */ NullBroadcaster::NullBroadcaster(std::set<int>& devices, int srcDeviceID) : ISafeBroadcastNetwork(devices, srcDeviceID, 0) { } void NullBroadcaster::makeConnections() { } NullBroadcaster& NullBroadcaster::construct() { _constructed = true; return *this; } void NullBroadcaster::broadcast(std::map<int, NVMatrix*>& mats, float scaleSource, float scaleTargets) { } void NullBroadcaster::broadcast(std::map<int, NVMatrix*>& mats) { } /* ========================= * NaiveBroadcaster * ========================= * * This one does src -> host -> all */ NaiveBroadcaster::NaiveBroadcaster(std::set<int>& devices, int srcDeviceID) : ISafeBroadcastNetwork(devices, srcDeviceID, devices.size()-1) { } void NaiveBroadcaster::makeConnections() { _src->addNext(*_peers.back()); // Make connection src -> host for (int i = 0; i < _peers.size() - 1; ++i) { if (_peers[i]->getDeviceID() != _src->getDeviceID()) { _peers.back()->addNext(*_peers[i]); // Make connection host -> peer } } } /* ========================= * EightGPUBroadcaster1 * ========================= * * This one does a fancy graph */ EightGPUBroadcaster1::EightGPUBroadcaster1(set<int>& devices, int srcDeviceID) : IBroadcastNetwork(devices, srcDeviceID, 4) { } void EightGPUBroadcaster1::makeConnections() { _src->addNext(*_peers[7]); _peers[7]->addNext(*_peers[0]); _peers[7]->addNext(*_peers[1]); _peers[7]->addNext(*_peers[3]); _peers[7]->addNext(*_peers[4]); _peers[1]->addNext(*_peers[2]); _peers[3]->addNext(*_peers[5]); _peers[4]->addNext(*_peers[6]); } /* ========================= * TwoPeeringGPUsBroadcaster * ========================= */ TwoPeeringGPUsBroadcaster::TwoPeeringGPUsBroadcaster(std::set<int>& devices, int srcDeviceID) : ISafeBroadcastNetwork(devices, srcDeviceID, 0) { _tgtDeviceID = *devices.begin() == srcDeviceID ? *(++devices.begin()) : *devices.begin(); } TwoPeeringGPUsBroadcaster::~TwoPeeringGPUsBroadcaster() { if (_constructed) { checkCudaErrors(cudaStreamDestroy(_tgtStream)); } } void TwoPeeringGPUsBroadcaster::makeConnections() { } void TwoPeeringGPUsBroadcaster::resetDeviceID(int d) { if (d >= 0) { NVMatrix::setDeviceID(d); } } ISafeBroadcastNetwork& TwoPeeringGPUsBroadcaster::construct() { assert(!_constructed); int d = NVMatrix::getDeviceID(); NVMatrix::setDeviceID(_tgtDeviceID); checkCudaErrors(cudaStreamCreateWithFlags(&_tgtStream, cudaStreamNonBlocking)); resetDeviceID(d); _constructed = true; return *this; } void TwoPeeringGPUsBroadcaster::_broadcast(std::map<int, NVMatrix*>& mats, float scaleSource, float scaleTargets) { int d = NVMatrix::getDeviceID(); NVMatrix::setDeviceID(_tgtDeviceID); mats[_tgtDeviceID]->add(*mats[_srcDeviceID], scaleTargets, scaleSource, *mats[_tgtDeviceID], _tgtStream); NVMatrix::syncStream(_tgtStream); resetDeviceID(d); }
a66777c685b7938fccaca406e028855e86bdf7dd.hip
// !!! This is a file automatically generated by hipify!!! // Author : B. Charlier (2017) #define UseCudaOnDoubles USE_DOUBLE_PRECISION #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> #include <mex.h> #include "sinkhornGpuUtils.cx" template< typename TYPE > int SinkhornGpuStep(TYPE epsilon,TYPE lambda, TYPE rho, TYPE weightGeom,TYPE weightGrass, TYPE* alpha_h, TYPE* x_h, TYPE* y_h, TYPE* beta_h, TYPE* mu_h, TYPE* nu_h, TYPE* gammax_h, TYPE* gammay_h, TYPE* gammaWd, int dimPoint, int dimVect, int nx, int ny, int max_iter) { // Data on the device. TYPE* alpha_d; TYPE* x_d; TYPE* y_d; TYPE* beta_d; TYPE* mu_d; TYPE* nu_d; TYPE* gammax_d; TYPE* gammay_d; // Allocate arrays on device. hipMalloc((void**)&alpha_d, sizeof(TYPE)*(nx*dimVect)); hipMalloc((void**)&x_d, sizeof(TYPE)*(nx*dimPoint)); hipMalloc((void**)&y_d, sizeof(TYPE)*(ny*dimPoint)); hipMalloc((void**)&beta_d, sizeof(TYPE)*(ny*dimVect)); hipMalloc((void**)&mu_d, sizeof(TYPE)*(nx)); hipMalloc((void**)&nu_d, sizeof(TYPE)*(ny)); hipMalloc((void**)&gammax_d, sizeof(TYPE)*(nx*dimVect)); hipMalloc((void**)&gammay_d, sizeof(TYPE)*(ny*dimVect)); // Send data from host to device. hipMemcpy(x_d, x_h, sizeof(TYPE)*(nx*dimPoint), hipMemcpyHostToDevice); hipMemcpy(y_d, y_h, sizeof(TYPE)*(ny*dimPoint), hipMemcpyHostToDevice); hipMemcpy(mu_d, mu_h, sizeof(TYPE)*(nx), hipMemcpyHostToDevice); hipMemcpy(nu_d, nu_h, sizeof(TYPE)*(ny), hipMemcpyHostToDevice); hipMemcpy(alpha_d, alpha_h, sizeof(TYPE)*(nx*dimVect), hipMemcpyHostToDevice); hipMemcpy(beta_d, beta_h, sizeof(TYPE)*(ny*dimVect), hipMemcpyHostToDevice); // compute on device. dim3 blockSizex; blockSizex.x = CUDA_BLOCK_SIZE; // number of threads in each block dim3 gridSizex; gridSizex.x = nx / blockSizex.x + (nx%blockSizex.x==0 ? 0 : 1); dim3 blockSizey; blockSizey.x = CUDA_BLOCK_SIZE; // number of threads in each block dim3 gridSizey; gridSizey.x = ny / blockSizey.x + (ny%blockSizey.x==0 ? 0 : 1); for (int iter = 0; iter<max_iter; iter++) { if(dimPoint==2 && dimVect==1) hipLaunchKernelGGL(( SinkhornGpuGradConvOnDevice<TYPE,2,1>), dim3(gridSizex),dim3(blockSizex),blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE), 0, epsilon,lambda,weightGeom,weightGrass, alpha_d, x_d, y_d, beta_d, mu_d, gammax_d, nx, ny); else if(dimPoint==4 && dimVect==1) hipLaunchKernelGGL(( SinkhornGpuGradConvOnDevice<TYPE,4,1>), dim3(gridSizex),dim3(blockSizex),blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE), 0, epsilon,lambda,weightGeom,weightGrass, alpha_d, x_d, y_d, beta_d, mu_d, gammax_d, nx, ny); else if(dimPoint==6 && dimVect==1) hipLaunchKernelGGL(( SinkhornGpuGradConvOnDevice<TYPE,6,1>), dim3(gridSizex),dim3(blockSizex),blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE), 0, epsilon,lambda,weightGeom,weightGrass, alpha_d, x_d, y_d, beta_d, mu_d, gammax_d, nx, ny); else { printf("error: dimensions of Sinkhorn kernel not implemented in cuda"); hipFree(alpha_d); hipFree(x_d); hipFree(y_d); hipFree(beta_d); hipFree(mu_d); hipFree(nu_d); hipFree(gammax_d); hipFree(gammay_d); return(-1); } // update u hipMemcpy(alpha_d,gammax_d, sizeof(TYPE)*(nx*dimVect), hipMemcpyDeviceToDevice); if(dimPoint==2 && dimVect==1) hipLaunchKernelGGL(( SinkhornGpuGradConvOnDevice<TYPE,2,1>), dim3(gridSizey),dim3(blockSizey),blockSizey.x*(dimPoint+dimVect)*sizeof(TYPE), 0, epsilon,lambda,weightGeom,weightGrass, beta_d, y_d, x_d, alpha_d, nu_d, gammay_d, ny, nx); else if(dimPoint==4 && dimVect==1) hipLaunchKernelGGL(( SinkhornGpuGradConvOnDevice<TYPE,4,1>), dim3(gridSizey),dim3(blockSizey),blockSizey.x*(dimPoint+dimVect)*sizeof(TYPE), 0, epsilon,lambda,weightGeom,weightGrass, beta_d, y_d, x_d, alpha_d, nu_d, gammay_d, ny, nx); else if(dimPoint==6 && dimVect==1) hipLaunchKernelGGL(( SinkhornGpuGradConvOnDevice<TYPE,6,1>), dim3(gridSizey),dim3(blockSizey),blockSizey.x*(dimPoint+dimVect)*sizeof(TYPE), 0, epsilon,lambda,weightGeom,weightGrass, beta_d, y_d, x_d, alpha_d, nu_d, gammay_d, ny, nx); else { printf("error: dimensions of SinkhornGpuGradConvOnDevice kernel not implemented in cuda"); hipFree(alpha_d); hipFree(x_d); hipFree(y_d); hipFree(beta_d); hipFree(mu_d); hipFree(nu_d); hipFree(gammax_d); hipFree(gammay_d); return(-1); } // update v hipMemcpy(beta_d, gammay_d,sizeof(TYPE)*(ny*dimVect), hipMemcpyDeviceToDevice); } // block until the device has completed hipDeviceSynchronize(); // Send data from device to host. hipMemcpy(gammax_h,alpha_d, sizeof(TYPE)*(nx*dimVect),hipMemcpyDeviceToHost); hipMemcpy(gammay_h,beta_d, sizeof(TYPE)*(ny*dimVect),hipMemcpyDeviceToHost); /*-------------------------------------------------------------------------------*/ // Compute dual energy values TYPE KLdumu = 0.0f, KLdvnu = 0.0f, totalmass = 0.0f; for (int i=0;i<nx;i++) KLdumu += (exp(- gammax_h[i]/rho ) -1) * mu_h[i]; for (int j=0;j<ny;j++) KLdvnu += (exp(- gammay_h[j]/rho ) -1) * nu_h[j]; if(dimPoint==2 && dimVect==1) hipLaunchKernelGGL(( WdualOnDevice<TYPE,2,1>), dim3(gridSizex),dim3(blockSizex),blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE), 0, epsilon,weightGeom,weightGrass,alpha_d, x_d, y_d, beta_d, gammax_d, nx, ny); else if(dimPoint==4 && dimVect==1) hipLaunchKernelGGL(( WdualOnDevice<TYPE,4,1>), dim3(gridSizex),dim3(blockSizex),blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE), 0, epsilon,weightGeom,weightGrass,alpha_d, x_d, y_d, beta_d, gammax_d, nx, ny); else if(dimPoint==6 && dimVect==1) hipLaunchKernelGGL(( WdualOnDevice<TYPE,6,1>), dim3(gridSizex),dim3(blockSizex),blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE), 0, epsilon,weightGeom,weightGrass,alpha_d, x_d, y_d, beta_d, gammax_d, nx, ny); else { printf("error: dimensions of Wdual kernel not implemented in cuda"); hipFree(alpha_d); hipFree(x_d); hipFree(y_d); hipFree(beta_d); hipFree(mu_d); hipFree(nu_d); hipFree(gammax_d); hipFree(gammay_d); return(-1); } // block until the device has completed hipDeviceSynchronize(); // Send data from device to host. TYPE *Gammax_h = new TYPE[nx]; hipMemcpy(Gammax_h,gammax_d, sizeof(TYPE)*(nx*dimVect),hipMemcpyDeviceToHost); for (int i=0;i<nx;i++) { totalmass += Gammax_h[i]; /*printf("%g\n", Gammax_h[i]);*/ } // compute the energy *gammaWd = totalmass - rho * KLdvnu - rho * KLdumu ; /*-------------------------------------------------------------------------------*/ // Free memory. hipFree(alpha_d); hipFree(x_d); hipFree(y_d); hipFree(mu_d); hipFree(nu_d); hipFree(beta_d); hipFree(gammax_d); hipFree(gammay_d); return 0; } void ExitFcn(void) { hipDeviceReset(); } ////////////////////////////////////////////////////////////////// ///////////////// MEX ENTRY POINT //////////////////////////////// ////////////////////////////////////////////////////////////////// /* the gateway function */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) //plhs: double *gamma //prhs: double *alpha, double *x, double *y, double *beta, double epsilon { // register an exit function to prevent crash at matlab exit or recompiling mexAtExit(ExitFcn); /* check for proper number of arguments */ if(nrhs != 11) mexErrMsgTxt("11 inputs required."); if(nlhs < 3 | nlhs > 3) mexErrMsgTxt("Three outputs required."); ////////////////////////////////////////////////////////////// // Input arguments ////////////////////////////////////////////////////////////// int argu = -1; //------ argument: alpha---------------// argu++; /* create a pointer to the input vectors wts */ double *alpha = mxGetPr(prhs[argu]); /* get the dimensions of the input weights */ int dimvect = mxGetM(prhs[argu]); int nx = mxGetN(prhs[argu]); //ncols //----- argument: x--------------// argu++; /* create a pointer to the input vectors srcs */ double *x = mxGetPr(prhs[argu]); /* input sources */ int dimpoint = mxGetM(prhs[argu]); //mrows /* check to make sure the second dimension is nx */ if( mxGetN(prhs[argu])!=nx ) mexErrMsgTxt("Input x must have same number of columns as alpha."); //----- argument: y--------------// argu++; /* create a pointer to the input vectors srcs */ double *y = mxGetPr(prhs[argu]); /* input sources */ int ny = mxGetN(prhs[argu]); //ncols /* check to make sure the second dimension is nx */ if( mxGetM(prhs[argu])!=dimpoint ) mexErrMsgTxt("Input y must have same number of rows as x."); //------ argument: beta---------------// argu++; /* create a pointer to the input vectors wts */ double *beta = mxGetPr(prhs[argu]); /* check to make sure the first dimension is dimvect */ if( mxGetM(prhs[argu])!=dimvect ) mexErrMsgTxt("Input beta must have same number of rows as alpha."); /* check to make sure the second dimension is nx */ if( mxGetN(prhs[argu])!=ny ) mexErrMsgTxt("Input beta must have same number of columns as y."); //----- argument: epsilon-------------// argu++; /* check to make sure the input argument is a scalar */ if( !mxIsDouble(prhs[argu]) || mxIsComplex(prhs[argu]) || mxGetN(prhs[argu])*mxGetM(prhs[argu])!=1 ) { mexErrMsgTxt("Input epsilon must be a scalar."); } /* get the scalar input epsilon */ double epsilon = mxGetScalar(prhs[argu]); if (epsilon <= 0.0) mexErrMsgTxt("Input epsilon must be a positive number."); //----- argument: lambda-------------// argu++; /* check to make sure the input argument is a scalar */ if( !mxIsDouble(prhs[argu]) || mxIsComplex(prhs[argu]) || mxGetN(prhs[argu])*mxGetM(prhs[argu])!=1 ) { mexErrMsgTxt("Input lambda must be a scalar."); } /* get the scalar input lambda */ double lambda = mxGetScalar(prhs[argu]); if (lambda <= 0.0) mexErrMsgTxt("Input lambda must be a positive number."); //----- argument: rho-------------// argu++; /* check to make sure the input argument is a scalar */ if( !mxIsDouble(prhs[argu]) || mxIsComplex(prhs[argu]) || mxGetN(prhs[argu])*mxGetM(prhs[argu])!=1 ) { mexErrMsgTxt("Input rho must be a scalar."); } /* get the scalar input epsilon */ double rho = mxGetScalar(prhs[argu]); if (rho <= 0.0) mexErrMsgTxt("Input rho must be a positive number."); //------ argument: mu---------------// argu++; /* create a pointer to the input vectors wts */ double *mu = mxGetPr(prhs[argu]); /* check to make sure the first dimension is dimvect */ if( mxGetM(prhs[argu])!=1 ) mexErrMsgTxt("Input mu must have one row."); /* check to make sure the second dimension is nx */ if( mxGetN(prhs[argu])!=nx ) mexErrMsgTxt("Input mu must have same number of columns as x."); //------ argument: nu---------------// argu++; /* create a pointer to the input vectors wts */ double *nu = mxGetPr(prhs[argu]); /* check to make sure the first dimension is dimvect */ if( mxGetM(prhs[argu])!=1 ) mexErrMsgTxt("Input nu must have one row."); /* check to make sure the second dimension is nx */ if( mxGetN(prhs[argu])!=ny ) mexErrMsgTxt("Input nu must have same number of columns as y."); //----- argument: weight -------------// argu++; /* check to make sure the input argument is a scalar */ if( mxGetN(prhs[argu])*mxGetM(prhs[argu])!=2 ) { mexErrMsgTxt("Input weight must be a 2 vector."); } /* Get the values*/ double *weightG = mxGetPr(prhs[argu]); //----- argument: max_iter -------------// argu++; /* check to make sure the input argument is a scalar */ if( mxGetN(prhs[argu])*mxGetM(prhs[argu])!=1 ) { mexErrMsgTxt("Input max_iter must be an integer."); } if (!mxIsInt32(prhs[argu])){ mexErrMsgTxt("Input max_iter must be an integer. Use int32() to cast."); } /* Get the values*/ int max_iter = (int)mxGetScalar(prhs[argu]); ////////////////////////////////////////////////////////////// // Output arguments ////////////////////////////////////////////////////////////// /* set the output pointer to the output result(vector) */ plhs[0] = mxCreateDoubleMatrix(dimvect,nx,mxREAL); /* create a C pointer to a copy of the output result(vector)*/ double *gammax = mxGetPr(plhs[0]); /* set the output pointer to the output result(vector) */ plhs[1] = mxCreateDoubleMatrix(dimvect,ny,mxREAL); /* create a C pointer to a copy of the output result(vector)*/ double *gammay = mxGetPr(plhs[1]); /* set the output pointer to the output result(vector) */ plhs[2] = mxCreateDoubleMatrix(1,1,mxREAL); /* create a C pointer to a copy of the output result(vector)*/ double *gammaWd = mxGetPr(plhs[2]); #if UseCudaOnDoubles SinkhornGpuStep<double>(epsilon,lambda,rho,weightG[0],weightG[1],alpha,x,y,beta,mu,nu,gammax,gammay,gammaWd,dimpoint,dimvect,nx,ny,max_iter); #else // convert to float float *alpha_f = new float[nx*dimvect]; float *x_f = new float[nx*dimpoint]; float *y_f = new float[ny*dimpoint]; float *beta_f = new float[ny*dimvect]; float *mu_f = new float[nx]; float *nu_f = new float[ny]; float *gammax_f = new float[nx]; float *gammay_f = new float[ny]; float gammaWd_f; for(int i=0; i<nx*dimvect; i++) alpha_f[i] = alpha[i]; for(int i=0; i<nx*dimpoint; i++) x_f[i] = x[i]; for(int i=0; i<ny*dimpoint; i++) y_f[i] = y[i]; for(int i=0; i<ny*dimvect; i++) beta_f[i] = beta[i]; for(int i=0; i<nx; i++) mu_f[i] = mu[i]; for(int i=0; i<ny; i++) nu_f[i] = nu[i]; // function calls; SinkhornGpuStep<float>(epsilon,lambda,rho,weightG[0],weightG[1],alpha_f,x_f,y_f,beta_f,mu_f,nu_f,gammax_f,gammay_f,&gammaWd_f,dimpoint,dimvect,nx,ny,max_iter); for(int i=0; i<nx; i++) gammax[i] = gammax_f[i]; for(int i=0; i<nx; i++) gammay[i] = gammay_f[i]; *gammaWd = double(gammaWd_f); delete [] alpha_f; delete [] x_f; delete [] y_f; delete [] beta_f; delete [] mu_f; delete [] nu_f; delete [] gammax_f; delete [] gammay_f; #endif return; }
a66777c685b7938fccaca406e028855e86bdf7dd.cu
// Author : B. Charlier (2017) #define UseCudaOnDoubles USE_DOUBLE_PRECISION #include <stdio.h> #include <assert.h> #include <cuda.h> #include <mex.h> #include "sinkhornGpuUtils.cx" template< typename TYPE > int SinkhornGpuStep(TYPE epsilon,TYPE lambda, TYPE rho, TYPE weightGeom,TYPE weightGrass, TYPE* alpha_h, TYPE* x_h, TYPE* y_h, TYPE* beta_h, TYPE* mu_h, TYPE* nu_h, TYPE* gammax_h, TYPE* gammay_h, TYPE* gammaWd, int dimPoint, int dimVect, int nx, int ny, int max_iter) { // Data on the device. TYPE* alpha_d; TYPE* x_d; TYPE* y_d; TYPE* beta_d; TYPE* mu_d; TYPE* nu_d; TYPE* gammax_d; TYPE* gammay_d; // Allocate arrays on device. cudaMalloc((void**)&alpha_d, sizeof(TYPE)*(nx*dimVect)); cudaMalloc((void**)&x_d, sizeof(TYPE)*(nx*dimPoint)); cudaMalloc((void**)&y_d, sizeof(TYPE)*(ny*dimPoint)); cudaMalloc((void**)&beta_d, sizeof(TYPE)*(ny*dimVect)); cudaMalloc((void**)&mu_d, sizeof(TYPE)*(nx)); cudaMalloc((void**)&nu_d, sizeof(TYPE)*(ny)); cudaMalloc((void**)&gammax_d, sizeof(TYPE)*(nx*dimVect)); cudaMalloc((void**)&gammay_d, sizeof(TYPE)*(ny*dimVect)); // Send data from host to device. cudaMemcpy(x_d, x_h, sizeof(TYPE)*(nx*dimPoint), cudaMemcpyHostToDevice); cudaMemcpy(y_d, y_h, sizeof(TYPE)*(ny*dimPoint), cudaMemcpyHostToDevice); cudaMemcpy(mu_d, mu_h, sizeof(TYPE)*(nx), cudaMemcpyHostToDevice); cudaMemcpy(nu_d, nu_h, sizeof(TYPE)*(ny), cudaMemcpyHostToDevice); cudaMemcpy(alpha_d, alpha_h, sizeof(TYPE)*(nx*dimVect), cudaMemcpyHostToDevice); cudaMemcpy(beta_d, beta_h, sizeof(TYPE)*(ny*dimVect), cudaMemcpyHostToDevice); // compute on device. dim3 blockSizex; blockSizex.x = CUDA_BLOCK_SIZE; // number of threads in each block dim3 gridSizex; gridSizex.x = nx / blockSizex.x + (nx%blockSizex.x==0 ? 0 : 1); dim3 blockSizey; blockSizey.x = CUDA_BLOCK_SIZE; // number of threads in each block dim3 gridSizey; gridSizey.x = ny / blockSizey.x + (ny%blockSizey.x==0 ? 0 : 1); for (int iter = 0; iter<max_iter; iter++) { if(dimPoint==2 && dimVect==1) SinkhornGpuGradConvOnDevice<TYPE,2,1><<<gridSizex,blockSizex,blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE)>>> (epsilon,lambda,weightGeom,weightGrass, alpha_d, x_d, y_d, beta_d, mu_d, gammax_d, nx, ny); else if(dimPoint==4 && dimVect==1) SinkhornGpuGradConvOnDevice<TYPE,4,1><<<gridSizex,blockSizex,blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE)>>> (epsilon,lambda,weightGeom,weightGrass, alpha_d, x_d, y_d, beta_d, mu_d, gammax_d, nx, ny); else if(dimPoint==6 && dimVect==1) SinkhornGpuGradConvOnDevice<TYPE,6,1><<<gridSizex,blockSizex,blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE)>>> (epsilon,lambda,weightGeom,weightGrass, alpha_d, x_d, y_d, beta_d, mu_d, gammax_d, nx, ny); else { printf("error: dimensions of Sinkhorn kernel not implemented in cuda"); cudaFree(alpha_d); cudaFree(x_d); cudaFree(y_d); cudaFree(beta_d); cudaFree(mu_d); cudaFree(nu_d); cudaFree(gammax_d); cudaFree(gammay_d); return(-1); } // update u cudaMemcpy(alpha_d,gammax_d, sizeof(TYPE)*(nx*dimVect), cudaMemcpyDeviceToDevice); if(dimPoint==2 && dimVect==1) SinkhornGpuGradConvOnDevice<TYPE,2,1><<<gridSizey,blockSizey,blockSizey.x*(dimPoint+dimVect)*sizeof(TYPE)>>> (epsilon,lambda,weightGeom,weightGrass, beta_d, y_d, x_d, alpha_d, nu_d, gammay_d, ny, nx); else if(dimPoint==4 && dimVect==1) SinkhornGpuGradConvOnDevice<TYPE,4,1><<<gridSizey,blockSizey,blockSizey.x*(dimPoint+dimVect)*sizeof(TYPE)>>> (epsilon,lambda,weightGeom,weightGrass, beta_d, y_d, x_d, alpha_d, nu_d, gammay_d, ny, nx); else if(dimPoint==6 && dimVect==1) SinkhornGpuGradConvOnDevice<TYPE,6,1><<<gridSizey,blockSizey,blockSizey.x*(dimPoint+dimVect)*sizeof(TYPE)>>> (epsilon,lambda,weightGeom,weightGrass, beta_d, y_d, x_d, alpha_d, nu_d, gammay_d, ny, nx); else { printf("error: dimensions of SinkhornGpuGradConvOnDevice kernel not implemented in cuda"); cudaFree(alpha_d); cudaFree(x_d); cudaFree(y_d); cudaFree(beta_d); cudaFree(mu_d); cudaFree(nu_d); cudaFree(gammax_d); cudaFree(gammay_d); return(-1); } // update v cudaMemcpy(beta_d, gammay_d,sizeof(TYPE)*(ny*dimVect), cudaMemcpyDeviceToDevice); } // block until the device has completed cudaThreadSynchronize(); // Send data from device to host. cudaMemcpy(gammax_h,alpha_d, sizeof(TYPE)*(nx*dimVect),cudaMemcpyDeviceToHost); cudaMemcpy(gammay_h,beta_d, sizeof(TYPE)*(ny*dimVect),cudaMemcpyDeviceToHost); /*-------------------------------------------------------------------------------*/ // Compute dual energy values TYPE KLdumu = 0.0f, KLdvnu = 0.0f, totalmass = 0.0f; for (int i=0;i<nx;i++) KLdumu += (exp(- gammax_h[i]/rho ) -1) * mu_h[i]; for (int j=0;j<ny;j++) KLdvnu += (exp(- gammay_h[j]/rho ) -1) * nu_h[j]; if(dimPoint==2 && dimVect==1) WdualOnDevice<TYPE,2,1><<<gridSizex,blockSizex,blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE)>>> (epsilon,weightGeom,weightGrass,alpha_d, x_d, y_d, beta_d, gammax_d, nx, ny); else if(dimPoint==4 && dimVect==1) WdualOnDevice<TYPE,4,1><<<gridSizex,blockSizex,blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE)>>> (epsilon,weightGeom,weightGrass,alpha_d, x_d, y_d, beta_d, gammax_d, nx, ny); else if(dimPoint==6 && dimVect==1) WdualOnDevice<TYPE,6,1><<<gridSizex,blockSizex,blockSizex.x*(dimPoint+dimVect)*sizeof(TYPE)>>> (epsilon,weightGeom,weightGrass,alpha_d, x_d, y_d, beta_d, gammax_d, nx, ny); else { printf("error: dimensions of Wdual kernel not implemented in cuda"); cudaFree(alpha_d); cudaFree(x_d); cudaFree(y_d); cudaFree(beta_d); cudaFree(mu_d); cudaFree(nu_d); cudaFree(gammax_d); cudaFree(gammay_d); return(-1); } // block until the device has completed cudaThreadSynchronize(); // Send data from device to host. TYPE *Gammax_h = new TYPE[nx]; cudaMemcpy(Gammax_h,gammax_d, sizeof(TYPE)*(nx*dimVect),cudaMemcpyDeviceToHost); for (int i=0;i<nx;i++) { totalmass += Gammax_h[i]; /*printf("%g\n", Gammax_h[i]);*/ } // compute the energy *gammaWd = totalmass - rho * KLdvnu - rho * KLdumu ; /*-------------------------------------------------------------------------------*/ // Free memory. cudaFree(alpha_d); cudaFree(x_d); cudaFree(y_d); cudaFree(mu_d); cudaFree(nu_d); cudaFree(beta_d); cudaFree(gammax_d); cudaFree(gammay_d); return 0; } void ExitFcn(void) { cudaDeviceReset(); } ////////////////////////////////////////////////////////////////// ///////////////// MEX ENTRY POINT //////////////////////////////// ////////////////////////////////////////////////////////////////// /* the gateway function */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) //plhs: double *gamma //prhs: double *alpha, double *x, double *y, double *beta, double epsilon { // register an exit function to prevent crash at matlab exit or recompiling mexAtExit(ExitFcn); /* check for proper number of arguments */ if(nrhs != 11) mexErrMsgTxt("11 inputs required."); if(nlhs < 3 | nlhs > 3) mexErrMsgTxt("Three outputs required."); ////////////////////////////////////////////////////////////// // Input arguments ////////////////////////////////////////////////////////////// int argu = -1; //------ argument: alpha---------------// argu++; /* create a pointer to the input vectors wts */ double *alpha = mxGetPr(prhs[argu]); /* get the dimensions of the input weights */ int dimvect = mxGetM(prhs[argu]); int nx = mxGetN(prhs[argu]); //ncols //----- argument: x--------------// argu++; /* create a pointer to the input vectors srcs */ double *x = mxGetPr(prhs[argu]); /* input sources */ int dimpoint = mxGetM(prhs[argu]); //mrows /* check to make sure the second dimension is nx */ if( mxGetN(prhs[argu])!=nx ) mexErrMsgTxt("Input x must have same number of columns as alpha."); //----- argument: y--------------// argu++; /* create a pointer to the input vectors srcs */ double *y = mxGetPr(prhs[argu]); /* input sources */ int ny = mxGetN(prhs[argu]); //ncols /* check to make sure the second dimension is nx */ if( mxGetM(prhs[argu])!=dimpoint ) mexErrMsgTxt("Input y must have same number of rows as x."); //------ argument: beta---------------// argu++; /* create a pointer to the input vectors wts */ double *beta = mxGetPr(prhs[argu]); /* check to make sure the first dimension is dimvect */ if( mxGetM(prhs[argu])!=dimvect ) mexErrMsgTxt("Input beta must have same number of rows as alpha."); /* check to make sure the second dimension is nx */ if( mxGetN(prhs[argu])!=ny ) mexErrMsgTxt("Input beta must have same number of columns as y."); //----- argument: epsilon-------------// argu++; /* check to make sure the input argument is a scalar */ if( !mxIsDouble(prhs[argu]) || mxIsComplex(prhs[argu]) || mxGetN(prhs[argu])*mxGetM(prhs[argu])!=1 ) { mexErrMsgTxt("Input epsilon must be a scalar."); } /* get the scalar input epsilon */ double epsilon = mxGetScalar(prhs[argu]); if (epsilon <= 0.0) mexErrMsgTxt("Input epsilon must be a positive number."); //----- argument: lambda-------------// argu++; /* check to make sure the input argument is a scalar */ if( !mxIsDouble(prhs[argu]) || mxIsComplex(prhs[argu]) || mxGetN(prhs[argu])*mxGetM(prhs[argu])!=1 ) { mexErrMsgTxt("Input lambda must be a scalar."); } /* get the scalar input lambda */ double lambda = mxGetScalar(prhs[argu]); if (lambda <= 0.0) mexErrMsgTxt("Input lambda must be a positive number."); //----- argument: rho-------------// argu++; /* check to make sure the input argument is a scalar */ if( !mxIsDouble(prhs[argu]) || mxIsComplex(prhs[argu]) || mxGetN(prhs[argu])*mxGetM(prhs[argu])!=1 ) { mexErrMsgTxt("Input rho must be a scalar."); } /* get the scalar input epsilon */ double rho = mxGetScalar(prhs[argu]); if (rho <= 0.0) mexErrMsgTxt("Input rho must be a positive number."); //------ argument: mu---------------// argu++; /* create a pointer to the input vectors wts */ double *mu = mxGetPr(prhs[argu]); /* check to make sure the first dimension is dimvect */ if( mxGetM(prhs[argu])!=1 ) mexErrMsgTxt("Input mu must have one row."); /* check to make sure the second dimension is nx */ if( mxGetN(prhs[argu])!=nx ) mexErrMsgTxt("Input mu must have same number of columns as x."); //------ argument: nu---------------// argu++; /* create a pointer to the input vectors wts */ double *nu = mxGetPr(prhs[argu]); /* check to make sure the first dimension is dimvect */ if( mxGetM(prhs[argu])!=1 ) mexErrMsgTxt("Input nu must have one row."); /* check to make sure the second dimension is nx */ if( mxGetN(prhs[argu])!=ny ) mexErrMsgTxt("Input nu must have same number of columns as y."); //----- argument: weight -------------// argu++; /* check to make sure the input argument is a scalar */ if( mxGetN(prhs[argu])*mxGetM(prhs[argu])!=2 ) { mexErrMsgTxt("Input weight must be a 2 vector."); } /* Get the values*/ double *weightG = mxGetPr(prhs[argu]); //----- argument: max_iter -------------// argu++; /* check to make sure the input argument is a scalar */ if( mxGetN(prhs[argu])*mxGetM(prhs[argu])!=1 ) { mexErrMsgTxt("Input max_iter must be an integer."); } if (!mxIsInt32(prhs[argu])){ mexErrMsgTxt("Input max_iter must be an integer. Use int32() to cast."); } /* Get the values*/ int max_iter = (int)mxGetScalar(prhs[argu]); ////////////////////////////////////////////////////////////// // Output arguments ////////////////////////////////////////////////////////////// /* set the output pointer to the output result(vector) */ plhs[0] = mxCreateDoubleMatrix(dimvect,nx,mxREAL); /* create a C pointer to a copy of the output result(vector)*/ double *gammax = mxGetPr(plhs[0]); /* set the output pointer to the output result(vector) */ plhs[1] = mxCreateDoubleMatrix(dimvect,ny,mxREAL); /* create a C pointer to a copy of the output result(vector)*/ double *gammay = mxGetPr(plhs[1]); /* set the output pointer to the output result(vector) */ plhs[2] = mxCreateDoubleMatrix(1,1,mxREAL); /* create a C pointer to a copy of the output result(vector)*/ double *gammaWd = mxGetPr(plhs[2]); #if UseCudaOnDoubles SinkhornGpuStep<double>(epsilon,lambda,rho,weightG[0],weightG[1],alpha,x,y,beta,mu,nu,gammax,gammay,gammaWd,dimpoint,dimvect,nx,ny,max_iter); #else // convert to float float *alpha_f = new float[nx*dimvect]; float *x_f = new float[nx*dimpoint]; float *y_f = new float[ny*dimpoint]; float *beta_f = new float[ny*dimvect]; float *mu_f = new float[nx]; float *nu_f = new float[ny]; float *gammax_f = new float[nx]; float *gammay_f = new float[ny]; float gammaWd_f; for(int i=0; i<nx*dimvect; i++) alpha_f[i] = alpha[i]; for(int i=0; i<nx*dimpoint; i++) x_f[i] = x[i]; for(int i=0; i<ny*dimpoint; i++) y_f[i] = y[i]; for(int i=0; i<ny*dimvect; i++) beta_f[i] = beta[i]; for(int i=0; i<nx; i++) mu_f[i] = mu[i]; for(int i=0; i<ny; i++) nu_f[i] = nu[i]; // function calls; SinkhornGpuStep<float>(epsilon,lambda,rho,weightG[0],weightG[1],alpha_f,x_f,y_f,beta_f,mu_f,nu_f,gammax_f,gammay_f,&gammaWd_f,dimpoint,dimvect,nx,ny,max_iter); for(int i=0; i<nx; i++) gammax[i] = gammax_f[i]; for(int i=0; i<nx; i++) gammay[i] = gammay_f[i]; *gammaWd = double(gammaWd_f); delete [] alpha_f; delete [] x_f; delete [] y_f; delete [] beta_f; delete [] mu_f; delete [] nu_f; delete [] gammax_f; delete [] gammay_f; #endif return; }
7eecb3c94d893de462239fc6b445dba6f361165b.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <limits> #include <iostream> //GPU Add #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // for timing #include <chrono> #include <ctime> // user include #include "CLUEAlgoGPU.h" __global__ void kernel_compute_histogram( LayerTilesGPU *d_hist, const PointsPtr d_points, int numberOfPoints ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < numberOfPoints) { // push index of points into tiles d_hist[d_points.layer[i]].fill(d_points.x[i], d_points.y[i], i); } } //kernel __global__ void kernel_compute_density( LayerTilesGPU *d_hist, PointsPtr d_points, float dc, int numberOfPoints ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numberOfPoints){ double rhoi{0.}; int layeri = d_points.layer[i]; float xi = d_points.x[i]; float yi = d_points.y[i]; // get search box int4 search_box = d_hist[layeri].searchBox(xi-dc, xi+dc, yi-dc, yi+dc); // loop over bins in the search box for(int xBin = search_box.x; xBin < search_box.y+1; ++xBin) { for(int yBin = search_box.z; yBin < search_box.w+1; ++yBin) { // get the id of this bin int binId = d_hist[layeri].getGlobalBinByBin(xBin,yBin); // get the size of this bin int binSize = d_hist[layeri][binId].size(); // interate inside this bin for (int binIter = 0; binIter < binSize; binIter++) { int j = d_hist[layeri][binId][binIter]; // query N_{dc_}(i) float xj = d_points.x[j]; float yj = d_points.y[j]; float dist_ij = std::sqrt((xi-xj)*(xi-xj) + (yi-yj)*(yi-yj)); if(dist_ij <= dc) { // sum weights within N_{dc_}(i) rhoi += d_points.weight[j]; } } // end of interate inside this bin } } // end of loop over bins in search box d_points.rho[i] = rhoi; } } //kernel __global__ void kernel_compute_distanceToHigher(LayerTilesGPU* d_hist, PointsPtr d_points, float dm, int numberOfPoints ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numberOfPoints){ int layeri = d_points.layer[i]; float deltai = std::numeric_limits<float>::max(); int nearestHigheri = -1; float xi = d_points.x[i]; float yi = d_points.y[i]; float rhoi = d_points.rho[i]; // get search box int4 search_box = d_hist[layeri].searchBox(xi-dm, xi+dm, yi-dm, yi+dm); // loop over all bins in the search box for(int xBin = search_box.x; xBin < search_box.y+1; ++xBin) { for(int yBin = search_box.z; yBin < search_box.w+1; ++yBin) { // get the id of this bin int binId = d_hist[layeri].getGlobalBinByBin(xBin,yBin); // get the size of this bin int binSize = d_hist[layeri][binId].size(); // interate inside this bin for (int binIter = 0; binIter < binSize; binIter++) { int j = d_hist[layeri][binId][binIter]; // query N'_{dm}(i) float xj = d_points.x[j]; float yj = d_points.y[j]; float dist_ij = std::sqrt((xi-xj)*(xi-xj) + (yi-yj)*(yi-yj)); bool foundHigher = (d_points.rho[j] > rhoi); // in the rare case where rho is the same, use detid foundHigher = foundHigher || ( (d_points.rho[j] == rhoi) && (j>i)); if(foundHigher && dist_ij <= dm) { // definition of N'_{dm}(i) // find the nearest point within N'_{dm}(i) if (dist_ij<deltai) { // update deltai and nearestHigheri deltai = dist_ij; nearestHigheri = j; } } } // end of interate inside this bin } } // end of loop over bins in search box d_points.delta[i] = deltai; d_points.nearestHigher[i] = nearestHigheri; } } //kernel __global__ void kernel_find_clusters( GPU::VecArray<int,maxNSeeds>* d_seeds, GPU::VecArray<int,maxNFollowers>* d_followers, PointsPtr d_points, float deltac, float deltao, float rhoc, int numberOfPoints ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numberOfPoints) { // initialize clusterIndex d_points.clusterIndex[i] = -1; // determine seed or outlier float deltai = d_points.delta[i]; float rhoi = d_points.rho[i]; bool isSeed = (deltai > deltac) && (rhoi >= rhoc); bool isOutlier = (deltai > deltao) && (rhoi < rhoc); if (isSeed) { // set isSeed as 1 d_points.isSeed[i] = 1; d_seeds[0].push_back(i); // head of d_seeds } else { if (!isOutlier) { assert(d_points.nearestHigher[i] < numberOfPoints); // register as follower at its nearest higher d_followers[d_points.nearestHigher[i]].push_back(i); } } } } //kernel __global__ void kernel_assign_clusters( const GPU::VecArray<int,maxNSeeds>* d_seeds, const GPU::VecArray<int,maxNFollowers>* d_followers, PointsPtr d_points, int numberOfPoints) { int idxCls = blockIdx.x * blockDim.x + threadIdx.x; const auto& seeds = d_seeds[0]; const auto nSeeds = seeds.size(); if (idxCls < nSeeds){ int localStack[localStackSizePerSeed] = {-1}; int localStackSize = 0; // asgine cluster to seed[idxCls] int idxThisSeed = seeds[idxCls]; d_points.clusterIndex[idxThisSeed] = idxCls; // push_back idThisSeed to localStack localStack[localStackSize] = idxThisSeed; localStackSize++; // process all elements in localStack while (localStackSize>0){ // get last element of localStack int idxEndOflocalStack = localStack[localStackSize-1]; int temp_clusterIndex = d_points.clusterIndex[idxEndOflocalStack]; // pop_back last element of localStack localStack[localStackSize-1] = -1; localStackSize--; // loop over followers of last element of localStack for( int j : d_followers[idxEndOflocalStack]){ // // pass id to follower d_points.clusterIndex[j] = temp_clusterIndex; // push_back follower to localStack localStack[localStackSize] = j; localStackSize++; } } } } //kernel void CLUEAlgoGPU::makeClusters( ) { copy_todevice(); clear_set(); //////////////////////////////////////////// // calcualte rho, delta and find seeds // 1 point per thread //////////////////////////////////////////// const dim3 blockSize(1024,1,1); const dim3 gridSize(ceil(points_.n/1024.0),1,1); hipLaunchKernelGGL(( kernel_compute_histogram) , dim3(gridSize),dim3(blockSize), 0, 0, d_hist, d_points, points_.n); hipLaunchKernelGGL(( kernel_compute_density) , dim3(gridSize),dim3(blockSize), 0, 0, d_hist, d_points, dc_, points_.n); hipLaunchKernelGGL(( kernel_compute_distanceToHigher) , dim3(gridSize),dim3(blockSize), 0, 0, d_hist, d_points, dm_, points_.n); hipLaunchKernelGGL(( kernel_find_clusters) , dim3(gridSize),dim3(blockSize), 0, 0, d_seeds, d_followers, d_points, deltac_,deltao_,rhoc_, points_.n); //////////////////////////////////////////// // assign clusters // 1 point per seeds //////////////////////////////////////////// const dim3 gridSize_nseeds(ceil(maxNSeeds/1024.0),1,1); hipLaunchKernelGGL(( kernel_assign_clusters) , dim3(gridSize_nseeds),dim3(blockSize), 0, 0, d_seeds, d_followers, d_points, points_.n); copy_tohost(); }
7eecb3c94d893de462239fc6b445dba6f361165b.cu
#include <math.h> #include <limits> #include <iostream> //GPU Add #include <cuda_runtime.h> #include <cuda.h> // for timing #include <chrono> #include <ctime> // user include #include "CLUEAlgoGPU.h" __global__ void kernel_compute_histogram( LayerTilesGPU *d_hist, const PointsPtr d_points, int numberOfPoints ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < numberOfPoints) { // push index of points into tiles d_hist[d_points.layer[i]].fill(d_points.x[i], d_points.y[i], i); } } //kernel __global__ void kernel_compute_density( LayerTilesGPU *d_hist, PointsPtr d_points, float dc, int numberOfPoints ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numberOfPoints){ double rhoi{0.}; int layeri = d_points.layer[i]; float xi = d_points.x[i]; float yi = d_points.y[i]; // get search box int4 search_box = d_hist[layeri].searchBox(xi-dc, xi+dc, yi-dc, yi+dc); // loop over bins in the search box for(int xBin = search_box.x; xBin < search_box.y+1; ++xBin) { for(int yBin = search_box.z; yBin < search_box.w+1; ++yBin) { // get the id of this bin int binId = d_hist[layeri].getGlobalBinByBin(xBin,yBin); // get the size of this bin int binSize = d_hist[layeri][binId].size(); // interate inside this bin for (int binIter = 0; binIter < binSize; binIter++) { int j = d_hist[layeri][binId][binIter]; // query N_{dc_}(i) float xj = d_points.x[j]; float yj = d_points.y[j]; float dist_ij = std::sqrt((xi-xj)*(xi-xj) + (yi-yj)*(yi-yj)); if(dist_ij <= dc) { // sum weights within N_{dc_}(i) rhoi += d_points.weight[j]; } } // end of interate inside this bin } } // end of loop over bins in search box d_points.rho[i] = rhoi; } } //kernel __global__ void kernel_compute_distanceToHigher(LayerTilesGPU* d_hist, PointsPtr d_points, float dm, int numberOfPoints ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numberOfPoints){ int layeri = d_points.layer[i]; float deltai = std::numeric_limits<float>::max(); int nearestHigheri = -1; float xi = d_points.x[i]; float yi = d_points.y[i]; float rhoi = d_points.rho[i]; // get search box int4 search_box = d_hist[layeri].searchBox(xi-dm, xi+dm, yi-dm, yi+dm); // loop over all bins in the search box for(int xBin = search_box.x; xBin < search_box.y+1; ++xBin) { for(int yBin = search_box.z; yBin < search_box.w+1; ++yBin) { // get the id of this bin int binId = d_hist[layeri].getGlobalBinByBin(xBin,yBin); // get the size of this bin int binSize = d_hist[layeri][binId].size(); // interate inside this bin for (int binIter = 0; binIter < binSize; binIter++) { int j = d_hist[layeri][binId][binIter]; // query N'_{dm}(i) float xj = d_points.x[j]; float yj = d_points.y[j]; float dist_ij = std::sqrt((xi-xj)*(xi-xj) + (yi-yj)*(yi-yj)); bool foundHigher = (d_points.rho[j] > rhoi); // in the rare case where rho is the same, use detid foundHigher = foundHigher || ( (d_points.rho[j] == rhoi) && (j>i)); if(foundHigher && dist_ij <= dm) { // definition of N'_{dm}(i) // find the nearest point within N'_{dm}(i) if (dist_ij<deltai) { // update deltai and nearestHigheri deltai = dist_ij; nearestHigheri = j; } } } // end of interate inside this bin } } // end of loop over bins in search box d_points.delta[i] = deltai; d_points.nearestHigher[i] = nearestHigheri; } } //kernel __global__ void kernel_find_clusters( GPU::VecArray<int,maxNSeeds>* d_seeds, GPU::VecArray<int,maxNFollowers>* d_followers, PointsPtr d_points, float deltac, float deltao, float rhoc, int numberOfPoints ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numberOfPoints) { // initialize clusterIndex d_points.clusterIndex[i] = -1; // determine seed or outlier float deltai = d_points.delta[i]; float rhoi = d_points.rho[i]; bool isSeed = (deltai > deltac) && (rhoi >= rhoc); bool isOutlier = (deltai > deltao) && (rhoi < rhoc); if (isSeed) { // set isSeed as 1 d_points.isSeed[i] = 1; d_seeds[0].push_back(i); // head of d_seeds } else { if (!isOutlier) { assert(d_points.nearestHigher[i] < numberOfPoints); // register as follower at its nearest higher d_followers[d_points.nearestHigher[i]].push_back(i); } } } } //kernel __global__ void kernel_assign_clusters( const GPU::VecArray<int,maxNSeeds>* d_seeds, const GPU::VecArray<int,maxNFollowers>* d_followers, PointsPtr d_points, int numberOfPoints) { int idxCls = blockIdx.x * blockDim.x + threadIdx.x; const auto& seeds = d_seeds[0]; const auto nSeeds = seeds.size(); if (idxCls < nSeeds){ int localStack[localStackSizePerSeed] = {-1}; int localStackSize = 0; // asgine cluster to seed[idxCls] int idxThisSeed = seeds[idxCls]; d_points.clusterIndex[idxThisSeed] = idxCls; // push_back idThisSeed to localStack localStack[localStackSize] = idxThisSeed; localStackSize++; // process all elements in localStack while (localStackSize>0){ // get last element of localStack int idxEndOflocalStack = localStack[localStackSize-1]; int temp_clusterIndex = d_points.clusterIndex[idxEndOflocalStack]; // pop_back last element of localStack localStack[localStackSize-1] = -1; localStackSize--; // loop over followers of last element of localStack for( int j : d_followers[idxEndOflocalStack]){ // // pass id to follower d_points.clusterIndex[j] = temp_clusterIndex; // push_back follower to localStack localStack[localStackSize] = j; localStackSize++; } } } } //kernel void CLUEAlgoGPU::makeClusters( ) { copy_todevice(); clear_set(); //////////////////////////////////////////// // calcualte rho, delta and find seeds // 1 point per thread //////////////////////////////////////////// const dim3 blockSize(1024,1,1); const dim3 gridSize(ceil(points_.n/1024.0),1,1); kernel_compute_histogram <<<gridSize,blockSize>>>(d_hist, d_points, points_.n); kernel_compute_density <<<gridSize,blockSize>>>(d_hist, d_points, dc_, points_.n); kernel_compute_distanceToHigher <<<gridSize,blockSize>>>(d_hist, d_points, dm_, points_.n); kernel_find_clusters <<<gridSize,blockSize>>>(d_seeds, d_followers, d_points, deltac_,deltao_,rhoc_, points_.n); //////////////////////////////////////////// // assign clusters // 1 point per seeds //////////////////////////////////////////// const dim3 gridSize_nseeds(ceil(maxNSeeds/1024.0),1,1); kernel_assign_clusters <<<gridSize_nseeds,blockSize>>>(d_seeds, d_followers, d_points, points_.n); copy_tohost(); }
1dfa5c5045502dad53f006e955afd68dd28d5292.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "./slice_layer.hpp" #include "../util/math_functions.hpp" namespace caffe { __global__ void Slice(const int nthreads, const real_t* in_data, const bool forward, const int num_slices, const int slice_size, const int bottom_slice_axis, const int top_slice_axis, const int offset_slice_axis, real_t* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_slice_size = slice_size * top_slice_axis; const int slice_num = index / total_slice_size; const int slice_index = index % total_slice_size; const int bottom_index = slice_index + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; if (forward) { out_data[index] = in_data[bottom_index]; } else { out_data[bottom_index] = in_data[index]; } } } void SliceLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { if (top.size() == 1) { return; } int offset_slice_axis = 0; const real_t* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); const bool kForward = true; for (int i = 0; i < top.size(); ++i) { real_t* top_data = top[i]->mutable_gpu_data(); const int top_slice_axis = top[i]->shape(slice_axis_); const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; Slice // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, kForward, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); offset_slice_axis += top_slice_axis; } } } // namespace caffe
1dfa5c5045502dad53f006e955afd68dd28d5292.cu
#include <vector> #include "./slice_layer.hpp" #include "../util/math_functions.hpp" namespace caffe { __global__ void Slice(const int nthreads, const real_t* in_data, const bool forward, const int num_slices, const int slice_size, const int bottom_slice_axis, const int top_slice_axis, const int offset_slice_axis, real_t* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_slice_size = slice_size * top_slice_axis; const int slice_num = index / total_slice_size; const int slice_index = index % total_slice_size; const int bottom_index = slice_index + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; if (forward) { out_data[index] = in_data[bottom_index]; } else { out_data[bottom_index] = in_data[index]; } } } void SliceLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { if (top.size() == 1) { return; } int offset_slice_axis = 0; const real_t* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); const bool kForward = true; for (int i = 0; i < top.size(); ++i) { real_t* top_data = top[i]->mutable_gpu_data(); const int top_slice_axis = top[i]->shape(slice_axis_); const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; Slice // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, bottom_data, kForward, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); offset_slice_axis += top_slice_axis; } } } // namespace caffe
ace4accaf60ba08426d97865fbe7ccb6992eb270.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <string> #include <hip/hip_runtime.h> #include <stdio.h> #include <opencv/cv.h> #include <opencv/highgui.h> #include <iostream> #include <chrono> #include "GrayscaleBenchmarker.h" #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) void __cudaSafeCall(hipError_t err, const char *file, const int line) { #ifdef CUDA_ERROR_CHECK if (hipSuccess != err) { fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } #endif return; } void __cudaCheckError(const char *file, const int line) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } err = hipDeviceSynchronize(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } #endif return; } __global__ void rgb_2_grey(uchar* const greyImage, const uchar4* const rgbImage, int rows, int columns) { int rgb_x = blockIdx.x * blockDim.x + threadIdx.x; //x coordinate of pixel int rgb_y = blockIdx.y * blockDim.y + threadIdx.y; //y coordinate of pixel if ((rgb_x >= columns) || (rgb_y >= rows)) { return; } int rgb_ab = rgb_y*columns + rgb_x; //absolute pixel position uchar4 rgb_Img = rgbImage[rgb_ab]; greyImage[rgb_ab] = uchar((float(rgb_Img.x))*0.299f + (float(rgb_Img.y))*0.587f + (float(rgb_Img.z))*0.114f); } using namespace cv; using namespace std; void Proc_Img_Gpu(uchar4** h_RGBImage, uchar** h_greyImage, uchar4 **d_RGBImage, uchar** d_greyImage); std::chrono::high_resolution_clock::duration RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols); void Save_Img(); Mat img_RGB; Mat img_Grey; uchar4 *d_rgbImg; uchar *d_greyImg; std::chrono::high_resolution_clock::duration GrayscaleBenchmarker::runGpu() { uchar4* h_rgbImg; uchar* h_greyImg; Proc_Img_Gpu(&h_rgbImg, &h_greyImg, &d_rgbImg, &d_greyImg); auto time = RGB_2_Greyscale(d_greyImg, d_rgbImg, img_RGB.rows, img_RGB.cols); Save_Img(); return time; } std::chrono::high_resolution_clock::duration GrayscaleBenchmarker::runCpu() { Mat image = imread("./test.png"); Mat gray; auto start = std::chrono::high_resolution_clock::now(); cvtColor(image,gray,CV_BGR2GRAY); auto end = std::chrono::high_resolution_clock::now(); return end - start; } GrayscaleBenchmarker::GrayscaleBenchmarker(size_t size, size_t arraySize) : Benchmarker(size) { } void Proc_Img_Gpu(uchar4** h_RGBImage, uchar** h_greyImage, uchar4 **d_RGBImage, uchar** d_greyImage){ hipFree(0); CudaCheckError(); //loads image into a matrix object along with the colors in BGR format (must convert to rgb). Mat img = imread("./test.png", CV_LOAD_IMAGE_COLOR); if (img.empty()){ cerr << "couldnt open file" << endl; exit(1); } //converts color type from BGR to RGB cvtColor(img, img_RGB, CV_BGR2RGBA); //allocate memory for new greyscale image. //img.rows returns the range of pixels in y, img.cols returns range of pixels in x //CV_8UC1 means 8 bit unsigned(non-negative) single channel of color, aka greyscale. //all three of the parameters allow the create function in the Mat class to determine how much memory to allocate img_Grey.create(img.rows, img.cols, CV_8UC1); //creates rgb and greyscale image arrays *h_RGBImage = (uchar4*)img_RGB.ptr<uchar>(0); //.ptr is a method in the mat class that returns a pointer to the first element of the matrix. *h_greyImage = (uchar*)img_Grey.ptr<uchar>(0); //this is just like a regular array/pointer mem address to first element of the array. This is templated //in this case the compiler runs the function for returning pointer of type unsigned char. for rgb image it is //cast to uchar4 struct to hold r,g, and b values. const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); //amount of pixels //allocate memory on gpu hipMalloc(d_RGBImage, sizeof(uchar4) * num_pix); //bites of 1 uchar4 times # of pixels gives number of bites necessary for array CudaCheckError(); hipMalloc(d_greyImage, sizeof(uchar) * num_pix);//bites of uchar times # pixels gives number of bites necessary for array CudaCheckError(); hipMemset(*d_greyImage, 0, sizeof(uchar) * num_pix); CudaCheckError(); //copy array into allocated space hipMemcpy(*d_RGBImage, *h_RGBImage, sizeof(uchar4)*num_pix, hipMemcpyHostToDevice); CudaCheckError(); d_rgbImg = *d_RGBImage; d_greyImg = *d_greyImage; } std::chrono::high_resolution_clock::duration RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols){ const int BS = 16; const dim3 blockSize(BS, BS); const dim3 gridSize((num_Cols / BS) + 1, (num_Rows / BS) + 1); hipDeviceSynchronize(); auto start = chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( rgb_2_grey) , dim3(gridSize), dim3(blockSize), 0, 0, d_greyImage, d_RGBImage, num_Rows, num_Cols); hipDeviceSynchronize(); auto end = chrono::high_resolution_clock::now(); hipDeviceSynchronize(); CudaCheckError(); return end - start; } void Save_Img(){ const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); hipMemcpy(img_Grey.ptr<uchar>(0), d_greyImg, sizeof(uchar)*num_pix, hipMemcpyDeviceToHost); CudaCheckError(); hipFree(d_rgbImg); hipFree(d_greyImg); }
ace4accaf60ba08426d97865fbe7ccb6992eb270.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <string> #include <cuda.h> #include <stdio.h> #include <opencv/cv.h> #include <opencv/highgui.h> #include <iostream> #include <chrono> #include "GrayscaleBenchmarker.h" #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) void __cudaSafeCall(cudaError err, const char *file, const int line) { #ifdef CUDA_ERROR_CHECK if (cudaSuccess != err) { fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } #endif return; } void __cudaCheckError(const char *file, const int line) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } err = cudaDeviceSynchronize(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } #endif return; } __global__ void rgb_2_grey(uchar* const greyImage, const uchar4* const rgbImage, int rows, int columns) { int rgb_x = blockIdx.x * blockDim.x + threadIdx.x; //x coordinate of pixel int rgb_y = blockIdx.y * blockDim.y + threadIdx.y; //y coordinate of pixel if ((rgb_x >= columns) || (rgb_y >= rows)) { return; } int rgb_ab = rgb_y*columns + rgb_x; //absolute pixel position uchar4 rgb_Img = rgbImage[rgb_ab]; greyImage[rgb_ab] = uchar((float(rgb_Img.x))*0.299f + (float(rgb_Img.y))*0.587f + (float(rgb_Img.z))*0.114f); } using namespace cv; using namespace std; void Proc_Img_Gpu(uchar4** h_RGBImage, uchar** h_greyImage, uchar4 **d_RGBImage, uchar** d_greyImage); std::chrono::high_resolution_clock::duration RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols); void Save_Img(); Mat img_RGB; Mat img_Grey; uchar4 *d_rgbImg; uchar *d_greyImg; std::chrono::high_resolution_clock::duration GrayscaleBenchmarker::runGpu() { uchar4* h_rgbImg; uchar* h_greyImg; Proc_Img_Gpu(&h_rgbImg, &h_greyImg, &d_rgbImg, &d_greyImg); auto time = RGB_2_Greyscale(d_greyImg, d_rgbImg, img_RGB.rows, img_RGB.cols); Save_Img(); return time; } std::chrono::high_resolution_clock::duration GrayscaleBenchmarker::runCpu() { Mat image = imread("./test.png"); Mat gray; auto start = std::chrono::high_resolution_clock::now(); cvtColor(image,gray,CV_BGR2GRAY); auto end = std::chrono::high_resolution_clock::now(); return end - start; } GrayscaleBenchmarker::GrayscaleBenchmarker(size_t size, size_t arraySize) : Benchmarker(size) { } void Proc_Img_Gpu(uchar4** h_RGBImage, uchar** h_greyImage, uchar4 **d_RGBImage, uchar** d_greyImage){ cudaFree(0); CudaCheckError(); //loads image into a matrix object along with the colors in BGR format (must convert to rgb). Mat img = imread("./test.png", CV_LOAD_IMAGE_COLOR); if (img.empty()){ cerr << "couldnt open file" << endl; exit(1); } //converts color type from BGR to RGB cvtColor(img, img_RGB, CV_BGR2RGBA); //allocate memory for new greyscale image. //img.rows returns the range of pixels in y, img.cols returns range of pixels in x //CV_8UC1 means 8 bit unsigned(non-negative) single channel of color, aka greyscale. //all three of the parameters allow the create function in the Mat class to determine how much memory to allocate img_Grey.create(img.rows, img.cols, CV_8UC1); //creates rgb and greyscale image arrays *h_RGBImage = (uchar4*)img_RGB.ptr<uchar>(0); //.ptr is a method in the mat class that returns a pointer to the first element of the matrix. *h_greyImage = (uchar*)img_Grey.ptr<uchar>(0); //this is just like a regular array/pointer mem address to first element of the array. This is templated //in this case the compiler runs the function for returning pointer of type unsigned char. for rgb image it is //cast to uchar4 struct to hold r,g, and b values. const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); //amount of pixels //allocate memory on gpu cudaMalloc(d_RGBImage, sizeof(uchar4) * num_pix); //bites of 1 uchar4 times # of pixels gives number of bites necessary for array CudaCheckError(); cudaMalloc(d_greyImage, sizeof(uchar) * num_pix);//bites of uchar times # pixels gives number of bites necessary for array CudaCheckError(); cudaMemset(*d_greyImage, 0, sizeof(uchar) * num_pix); CudaCheckError(); //copy array into allocated space cudaMemcpy(*d_RGBImage, *h_RGBImage, sizeof(uchar4)*num_pix, cudaMemcpyHostToDevice); CudaCheckError(); d_rgbImg = *d_RGBImage; d_greyImg = *d_greyImage; } std::chrono::high_resolution_clock::duration RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols){ const int BS = 16; const dim3 blockSize(BS, BS); const dim3 gridSize((num_Cols / BS) + 1, (num_Rows / BS) + 1); cudaDeviceSynchronize(); auto start = chrono::high_resolution_clock::now(); rgb_2_grey <<<gridSize, blockSize>>>(d_greyImage, d_RGBImage, num_Rows, num_Cols); cudaDeviceSynchronize(); auto end = chrono::high_resolution_clock::now(); cudaDeviceSynchronize(); CudaCheckError(); return end - start; } void Save_Img(){ const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); cudaMemcpy(img_Grey.ptr<uchar>(0), d_greyImg, sizeof(uchar)*num_pix, cudaMemcpyDeviceToHost); CudaCheckError(); cudaFree(d_rgbImg); cudaFree(d_greyImg); }
860f1f6a46c1acf61d7469fe22987f1b4772a252.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <omp.h> #include "common.h" #define RADIUS 512 #define BLOCK_SIZE 512 #define NUM_ELEMENTS 1024000 __global__ void stencil_1d_device(int *in, int *out, int N) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < N) { // Apply the stencil int result = 0; for (int i = index ; i < index + 2*RADIUS + 1; i++) result += in[i]; // Store the result out[index] = result; } } __global__ void stencil_1d_shared(int *in, int *out, int N) { __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS; if(gindex < N + RADIUS) { int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } // Make sure all threads get to this point before proceeding! __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += temp[lindex + offset]; // Store the result out[gindex-RADIUS] = result; } } int main() { unsigned int i; int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS]; int *d_in, *d_out; // Initialize host data for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i ) h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7 // Allocate space on the device cudaCheck( hipMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) ); cudaCheck( hipMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) ); //Timing structures hipEvent_t start,stop; float elapsedTime; hipEventCreate(&start); hipEventCreate(&stop); // Copy input data to device cudaCheck( hipMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), hipMemcpyHostToDevice) ); hipEventRecord(start,0); cudaCheck( hipMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( stencil_1d_shared), dim3((NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, d_in, d_out, NUM_ELEMENTS); cudaCheck( hipMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), hipMemcpyDeviceToHost) ); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime,start,stop); // Verify every out value is 2*RADIUS + 1 for( i = 0; i < NUM_ELEMENTS; ++i ) { if (h_out[i] != 2 * RADIUS + 1) { printf("Element h_out[%d] == %d != %d\n", i, h_out[i], 2*RADIUS+1); break; } } if (i == NUM_ELEMENTS) printf("SUCCESS GPU_SHARED in %f mseconds!\n", elapsedTime); hipEventRecord(start,0); cudaCheck( hipMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( stencil_1d_device), dim3((NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, d_in, d_out, NUM_ELEMENTS); cudaCheck( hipMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), hipMemcpyDeviceToHost) ); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime,start,stop); // Verify every out value is 2*RADIUS + 1 for( i = 0; i < NUM_ELEMENTS; ++i ) { if (h_out[i] != 2 * RADIUS + 1) { printf("Element h_out[%d] == %d != %d\n", i, h_out[i], 2*RADIUS+1); break; } } if (i == NUM_ELEMENTS) printf("SUCCESS GPU_DEVICE in %f mseconds!\n", elapsedTime); double startt = omp_get_wtime(); #pragma omp parallel for for(int i = 0; i < NUM_ELEMENTS; i++) { int sum = 0; for(int j = i; j < i + 2*RADIUS + 1; j++) { sum += h_in[j]; } h_out[i] = sum; } double endt = omp_get_wtime(); for( i = 0; i < NUM_ELEMENTS; ++i ) { if (h_out[i] != 2 * RADIUS + 1) { printf("Element h_out[%d] == %d != %d\n", i, h_out[i], 2*RADIUS+1); break; } } if (i == NUM_ELEMENTS) printf("SUCCESS CPU in %f mseconds!\n", 1000 * (endt - startt)); // Free out memory hipFree(d_in); hipFree(d_out); return 0; }
860f1f6a46c1acf61d7469fe22987f1b4772a252.cu
#include <stdio.h> #include <omp.h> #include "common.h" #define RADIUS 512 #define BLOCK_SIZE 512 #define NUM_ELEMENTS 1024000 __global__ void stencil_1d_device(int *in, int *out, int N) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < N) { // Apply the stencil int result = 0; for (int i = index ; i < index + 2*RADIUS + 1; i++) result += in[i]; // Store the result out[index] = result; } } __global__ void stencil_1d_shared(int *in, int *out, int N) { __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS; if(gindex < N + RADIUS) { int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } // Make sure all threads get to this point before proceeding! __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += temp[lindex + offset]; // Store the result out[gindex-RADIUS] = result; } } int main() { unsigned int i; int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS]; int *d_in, *d_out; // Initialize host data for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i ) h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7 // Allocate space on the device cudaCheck( cudaMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) ); cudaCheck( cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) ); //Timing structures cudaEvent_t start,stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); // Copy input data to device cudaCheck( cudaMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), cudaMemcpyHostToDevice) ); cudaEventRecord(start,0); cudaCheck( cudaMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), cudaMemcpyHostToDevice) ); stencil_1d_shared<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out, NUM_ELEMENTS); cudaCheck( cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost) ); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime,start,stop); // Verify every out value is 2*RADIUS + 1 for( i = 0; i < NUM_ELEMENTS; ++i ) { if (h_out[i] != 2 * RADIUS + 1) { printf("Element h_out[%d] == %d != %d\n", i, h_out[i], 2*RADIUS+1); break; } } if (i == NUM_ELEMENTS) printf("SUCCESS GPU_SHARED in %f mseconds!\n", elapsedTime); cudaEventRecord(start,0); cudaCheck( cudaMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), cudaMemcpyHostToDevice) ); stencil_1d_device<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out, NUM_ELEMENTS); cudaCheck( cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost) ); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime,start,stop); // Verify every out value is 2*RADIUS + 1 for( i = 0; i < NUM_ELEMENTS; ++i ) { if (h_out[i] != 2 * RADIUS + 1) { printf("Element h_out[%d] == %d != %d\n", i, h_out[i], 2*RADIUS+1); break; } } if (i == NUM_ELEMENTS) printf("SUCCESS GPU_DEVICE in %f mseconds!\n", elapsedTime); double startt = omp_get_wtime(); #pragma omp parallel for for(int i = 0; i < NUM_ELEMENTS; i++) { int sum = 0; for(int j = i; j < i + 2*RADIUS + 1; j++) { sum += h_in[j]; } h_out[i] = sum; } double endt = omp_get_wtime(); for( i = 0; i < NUM_ELEMENTS; ++i ) { if (h_out[i] != 2 * RADIUS + 1) { printf("Element h_out[%d] == %d != %d\n", i, h_out[i], 2*RADIUS+1); break; } } if (i == NUM_ELEMENTS) printf("SUCCESS CPU in %f mseconds!\n", 1000 * (endt - startt)); // Free out memory cudaFree(d_in); cudaFree(d_out); return 0; }
0f5f556715c346e4711c4fb60ac1257ea4df8c80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : histogram_equalization_CUDA.cu Author : francesco Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <stdlib.h> #include <string> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <sys/time.h> #include <time.h> using namespace std; using namespace cv; __global__ void make_histogram(unsigned char *image, int width, int height, int *histogram){ int idx = blockIdx.x * blockDim.x + threadIdx.x; long index; for(int i = idx; i < width * height; i += blockDim.x * gridDim.x){ index = i * 3; int R = image[index]; int G = image[index + 1]; int B = image[index + 2]; int Y = R * .299000 + G * .587000 + B * .114000; int U = R * -.168736 + G * -0.331264 + B * .500000 + 128; int V = R * .500000 + G * -.418688 + B * -.081312 + 128; atomicAdd(&(histogram[Y]),1); image[index] = Y; image[index + 1] = U; image[index + 2] = V; } __syncthreads(); } __global__ void equalize(int *equalized, int *cumulative_dist, int *histogram, int width, int height){ int idx = blockIdx.x * blockDim.x + threadIdx.x; for(int k = idx; k < 256; k += blockDim.x * gridDim.x){ equalized[k] = (int)(((float)cumulative_dist[k] - histogram[0])/((float)width * height - 1) * 255); } } __global__ void YUV2RGB(unsigned char *image, int *cumulative_dist,int *histogram, int *equalized, int width, int height){ int idx = blockIdx.x * blockDim.x + threadIdx.x; long index; for(int i = idx; i < width * height; i += blockDim.x * gridDim.x){ index = i * 3; int Y = equalized[image[index]]; int U = image[index + 1]; int V = image[index + 2]; unsigned char R = (unsigned char)max(0, min(255,(int)(Y + 1.4075 * (V - 128)))); unsigned char G = (unsigned char)max(0, min(255,(int)(Y - 1.3455 * (U - 128) - (.7169 * (V - 128))))); unsigned char B = (unsigned char)max(0, min(255,(int)(Y + 1.7790 * (U - 128)))); image[index] = R; image[index + 1] = G; image[index + 2] = B; } } int main(){ string folder_path = "/home/lombardiminervini/cuda-workspace/histogram_equalization_CUDA/src/images/"; string image_path = "tree.jpg"; Mat image = imread(folder_path + image_path); //load the image Size size (100, 100); resize(image, image, size); if(!image.data){ cout << "no image found"; return -1; } struct timeval start, end; gettimeofday(&start, NULL); int width = image.cols; int height = image.rows; int host_equalized[256]; //cpu equalized histogram int host_cumulative_dist[256]; unsigned char *host_image = image.ptr(); //Mat image to array image int host_histogram[256] = {0}; //cpu histogram unsigned char *device_image; //gpu image int *device_histogram; //gpu histogram int *device_equalized; //gpu equalized histogram int *device_cumulative_dist; //gpu cumulative dist. hipMalloc((void **)&device_image, sizeof(char) * (width * height * 3)); //gpu space allocation hipMalloc((void **)&device_histogram, sizeof(int) * 256); // hipMalloc((void **)&device_equalized, sizeof(int) * 256); // hipMalloc((void **)&device_cumulative_dist, sizeof(int) * 256); // hipMemcpy(device_image, host_image, sizeof(char) * (width * height * 3), hipMemcpyHostToDevice); //copy to gpu hipMemcpy(device_histogram, host_histogram, sizeof(int) * 256, hipMemcpyHostToDevice); // int block_size = 256; int grid_size = (width * height + (block_size - 1))/block_size; hipLaunchKernelGGL(( make_histogram), dim3(grid_size), dim3(block_size), 0, 0, device_image, width, height, device_histogram); //call first kernel hipMemcpy(host_histogram, device_histogram, sizeof(int) * 256, hipMemcpyDeviceToHost); host_cumulative_dist[0] = host_histogram[0]; //compute cumulative dist. in cpu // for(int i = 1; i < 256; i++){ // host_cumulative_dist[i] = host_histogram[i] + host_cumulative_dist[i-1]; // } // hipMemcpy(device_cumulative_dist, host_cumulative_dist, sizeof(int) * 256, hipMemcpyHostToDevice); hipMemcpy(device_equalized, host_equalized, sizeof(int) * 256, hipMemcpyHostToDevice); hipLaunchKernelGGL(( equalize), dim3(grid_size), dim3(block_size), 0, 0, device_equalized, device_cumulative_dist, device_histogram, width, height); //call second kernel hipLaunchKernelGGL(( YUV2RGB), dim3(grid_size), dim3(block_size), 0, 0, device_image, device_cumulative_dist, device_histogram, device_equalized, width, height); //call third kernel hipMemcpy(host_image, device_image, sizeof(char) * (width * height * 3), hipMemcpyDeviceToHost); hipFree(device_image); //free gpu hipFree(device_histogram); // hipFree(device_equalized); // hipFree(device_cumulative_dist); // gettimeofday(&end, NULL); double elapsed = ((end.tv_sec - start.tv_sec)*1000 + (end.tv_usec - start.tv_usec)/1000)/1.e3; cout << elapsed; cout << "correctly freed memory \n"; Mat final_image = Mat(Size(width,height), CV_8UC3, host_image); string save_folder_path = "cuda-workspace/histogram_equalization_CUDA/src/saved/"; string save_image_path = "desk.jpg"; imwrite(save_folder_path + save_image_path, final_image); //save equalized RGB image cout << "correctly saved image"; return 0; }
0f5f556715c346e4711c4fb60ac1257ea4df8c80.cu
/* ============================================================================ Name : histogram_equalization_CUDA.cu Author : francesco Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <stdlib.h> #include <string> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <sys/time.h> #include <time.h> using namespace std; using namespace cv; __global__ void make_histogram(unsigned char *image, int width, int height, int *histogram){ int idx = blockIdx.x * blockDim.x + threadIdx.x; long index; for(int i = idx; i < width * height; i += blockDim.x * gridDim.x){ index = i * 3; int R = image[index]; int G = image[index + 1]; int B = image[index + 2]; int Y = R * .299000 + G * .587000 + B * .114000; int U = R * -.168736 + G * -0.331264 + B * .500000 + 128; int V = R * .500000 + G * -.418688 + B * -.081312 + 128; atomicAdd(&(histogram[Y]),1); image[index] = Y; image[index + 1] = U; image[index + 2] = V; } __syncthreads(); } __global__ void equalize(int *equalized, int *cumulative_dist, int *histogram, int width, int height){ int idx = blockIdx.x * blockDim.x + threadIdx.x; for(int k = idx; k < 256; k += blockDim.x * gridDim.x){ equalized[k] = (int)(((float)cumulative_dist[k] - histogram[0])/((float)width * height - 1) * 255); } } __global__ void YUV2RGB(unsigned char *image, int *cumulative_dist,int *histogram, int *equalized, int width, int height){ int idx = blockIdx.x * blockDim.x + threadIdx.x; long index; for(int i = idx; i < width * height; i += blockDim.x * gridDim.x){ index = i * 3; int Y = equalized[image[index]]; int U = image[index + 1]; int V = image[index + 2]; unsigned char R = (unsigned char)max(0, min(255,(int)(Y + 1.4075 * (V - 128)))); unsigned char G = (unsigned char)max(0, min(255,(int)(Y - 1.3455 * (U - 128) - (.7169 * (V - 128))))); unsigned char B = (unsigned char)max(0, min(255,(int)(Y + 1.7790 * (U - 128)))); image[index] = R; image[index + 1] = G; image[index + 2] = B; } } int main(){ string folder_path = "/home/lombardiminervini/cuda-workspace/histogram_equalization_CUDA/src/images/"; string image_path = "tree.jpg"; Mat image = imread(folder_path + image_path); //load the image Size size (100, 100); resize(image, image, size); if(!image.data){ cout << "no image found"; return -1; } struct timeval start, end; gettimeofday(&start, NULL); int width = image.cols; int height = image.rows; int host_equalized[256]; //cpu equalized histogram int host_cumulative_dist[256]; unsigned char *host_image = image.ptr(); //Mat image to array image int host_histogram[256] = {0}; //cpu histogram unsigned char *device_image; //gpu image int *device_histogram; //gpu histogram int *device_equalized; //gpu equalized histogram int *device_cumulative_dist; //gpu cumulative dist. cudaMalloc((void **)&device_image, sizeof(char) * (width * height * 3)); //gpu space allocation cudaMalloc((void **)&device_histogram, sizeof(int) * 256); // cudaMalloc((void **)&device_equalized, sizeof(int) * 256); // cudaMalloc((void **)&device_cumulative_dist, sizeof(int) * 256); // cudaMemcpy(device_image, host_image, sizeof(char) * (width * height * 3), cudaMemcpyHostToDevice); //copy to gpu cudaMemcpy(device_histogram, host_histogram, sizeof(int) * 256, cudaMemcpyHostToDevice); // int block_size = 256; int grid_size = (width * height + (block_size - 1))/block_size; make_histogram<<<grid_size, block_size>>> (device_image, width, height, device_histogram); //call first kernel cudaMemcpy(host_histogram, device_histogram, sizeof(int) * 256, cudaMemcpyDeviceToHost); host_cumulative_dist[0] = host_histogram[0]; //compute cumulative dist. in cpu // for(int i = 1; i < 256; i++){ // host_cumulative_dist[i] = host_histogram[i] + host_cumulative_dist[i-1]; // } // cudaMemcpy(device_cumulative_dist, host_cumulative_dist, sizeof(int) * 256, cudaMemcpyHostToDevice); cudaMemcpy(device_equalized, host_equalized, sizeof(int) * 256, cudaMemcpyHostToDevice); equalize<<<grid_size, block_size>>>(device_equalized, device_cumulative_dist, device_histogram, width, height); //call second kernel YUV2RGB<<<grid_size, block_size>>>(device_image, device_cumulative_dist, device_histogram, device_equalized, width, height); //call third kernel cudaMemcpy(host_image, device_image, sizeof(char) * (width * height * 3), cudaMemcpyDeviceToHost); cudaFree(device_image); //free gpu cudaFree(device_histogram); // cudaFree(device_equalized); // cudaFree(device_cumulative_dist); // gettimeofday(&end, NULL); double elapsed = ((end.tv_sec - start.tv_sec)*1000 + (end.tv_usec - start.tv_usec)/1000)/1.e3; cout << elapsed; cout << "correctly freed memory \n"; Mat final_image = Mat(Size(width,height), CV_8UC3, host_image); string save_folder_path = "cuda-workspace/histogram_equalization_CUDA/src/saved/"; string save_image_path = "desk.jpg"; imwrite(save_folder_path + save_image_path, final_image); //save equalized RGB image cout << "correctly saved image"; return 0; }
4e4c033dcde35f32c348f21d1eca4ee5f607adbf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "CascadedCompressionGPU.h" #include "CascadedMetadata.h" #include "CascadedMetadataOnGPU.h" #include "CascadedDecompressionKernels.cuh" #include "Check.h" #include "CudaUtils.h" #include "cascaded.h" #include "nvcomp.h" #include "nvcomp.hpp" #include "type_macros.h" #include "unpack.h" #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Weffc++" #pragma GCC diagnostic ignored "-Wunused-parameter" #endif #include <hipcub/hipcub.hpp> #ifdef __GNUC__ #pragma GCC diagnostic pop #endif #ifdef USE_RMM #include <rmm/rmm.h> #endif #include <cassert> #include <iostream> #include <list> #include <map> #include <mutex> #include <sstream> #include <vector> // align all temp allocations by 512B #define CUDA_MEM_ALIGN(size) (((size) + 0x1FF) & ~0x1FF) #ifndef RLE_THREAD_BLOCK #define RLE_THREAD_BLOCK 128 #endif #ifndef RLE_ELEMS_PER_THREAD #define RLE_ELEMS_PER_THREAD 4 #endif #define RLE_ELEMS_PER_BLOCK (RLE_THREAD_BLOCK * RLE_ELEMS_PER_THREAD) namespace nvcomp { // internal representations: one kernel per scheme enum nvcompScheme_t { NVCOMP_SCHEME_BP, NVCOMP_SCHEME_RLE, NVCOMP_SCHEME_DELTA, NVCOMP_SCHEME_RLE_DELTA, // automatically fused RLE+Delta to reduce mem // traffic }; struct nvcompLayer_t; struct nvcompDataNode_t { void* ptr; nvcompType_t type; int packing; nvcompLayer_t* parentLayer; size_t length; // to enable BP as a separate layer, default -1 int pointToId; }; struct nvcompLayer_t { nvcompScheme_t scheme; size_t maxOutputSize; nvcompDataNode_t* vals; nvcompDataNode_t* runs; nvcompDataNode_t* output; // TODO: can we get rid of those int valId; int runId; int outputId; }; struct nvcompIntConfig_t { int outputId = 0; nvcompType_t outputType = NVCOMP_TYPE_INT; size_t maxOutputSize = 0; std::list<nvcompLayer_t> layers = {}; std::map<int, nvcompDataNode_t> nodes = {}; // TODO: should we make this nvcompData_t instead of int? // compute the workspace size size_t getWorkspaceBytes(); size_t getWorkspaceBytes(nvcompDataNode_t* node); // fuse kernels, etc. void optimizeLayers(); }; struct nvcompIntTask_t { // TODO: add CUDA event assigned to this task }; struct nvcompIntHandle_t { nvcompIntConfig_t* config = nullptr; hipStream_t stream = 0; // main decomp functions template <typename outputT> nvcompError_t decompCPU( nvcompDataNode_t* node, const void** inputData, const void** h_headers); template <typename outputT, typename runT> nvcompError_t decompGPU( nvcompDataNode_t* node, const void** inputData, const void** h_headers, hipStream_t stream); // workspace memory size_t workspaceBytes = 0; void* workspaceStorage = nullptr; // workspace mem management nvcompError_t release(); nvcompError_t allocateAsync(); // new function that splits of pre-allocated memory // workspace breakdown size_t max_input_len = 0; // maximum input RLE length size_t max_output_len = 0; // maximum output RLE length void* temp_val = nullptr; // temp RLE val expansions void* temp_run = nullptr; // temp RLE run expansions void* temp_delta = nullptr; // temp Delta expansions void* temp_output = nullptr; // temp Delta expansions // cub scan memory size_t temp_scan_bytes = 0; void* temp_scan = nullptr; // block indices start and offsets size_t max_num_blocks = 0; size_t* start_ind = nullptr; size_t* start_off = nullptr; }; template <typename keyT, typename valueT> struct SharedMap { std::map<keyT, valueT> data = {}; std::mutex m = {}; // find the next available id keyT find_next() { std::lock_guard<std::mutex> guard(m); int id = 0; while (data.find(id) != data.end()) id++; return (keyT)id; } bool exists(const keyT& key) { std::lock_guard<std::mutex> guard(m); return data.find(key) != data.end(); } void insert(const keyT& key, const valueT& val) { std::lock_guard<std::mutex> guard(m); if (data.find(key) == data.end()) data[key] = val; } valueT& operator[](const keyT& key) { std::lock_guard<std::mutex> guard(m); return data[key]; } void erase(const keyT& key) { std::lock_guard<std::mutex> guard(m); data.erase(key); } }; // internal collections SharedMap<nvcompConfig_t, nvcompIntConfig_t> configs; SharedMap<nvcompHandle_t, nvcompIntHandle_t> handles; // TODO: can we get rid of these? std::mutex config_mutex; std::mutex handle_mutex; namespace { template <typename T> void cubDeviceScanTempSpace(size_t& temp_scan_bytes, const size_t max_input_len) { void* temp_scan = nullptr; T* temp_run = nullptr; hipcub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, temp_run, temp_run, max_input_len); } void checkCompressSize(const size_t numBytes) { const size_t maxBytes = static_cast<size_t>(std::numeric_limits<int>::max()); if (numBytes > maxBytes) { throw std::runtime_error( "Cascaded compression can only compress up to a maximum of " + std::to_string(maxBytes) + " bytes at a time (requested " + std::to_string(numBytes) + " bytes)."); } } nvcompIntConfig_t generateConfig(const CascadedMetadata* const metadata) { const int numRLEs = metadata->getNumRLEs(); const int numDeltas = metadata->getNumDeltas(); const bool bitPacking = metadata->useBitPacking(); int vals_id = 0; // initialize config const nvcompType_t type = metadata->getValueType(); nvcompIntConfig_t config; config.outputId = vals_id; config.outputType = type; config.maxOutputSize = metadata->getUncompressedSize(); const nvcompType_t runType = selectRunsType(metadata->getNumUncompressedElements()); const size_t maxSegmentSize = metadata->getUncompressedSize(); config.nodes[0].length = metadata->getNumUncompressedElements(); // A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally // having bit packing applied const int numSteps = ::max(numRLEs, numDeltas); for (int r = numSteps - 1; r >= 0; r--) { const int inputId = vals_id; if (numSteps - r - 1 < numRLEs) { const int runId = ++vals_id; const int valId = ++vals_id; // add to config nvcompConfigAddRLE_BP( &config, inputId, maxSegmentSize, valId, type, bitPacking, runId, runType, bitPacking); config.nodes[valId].length = metadata->getNumElementsOf(valId); config.nodes[runId].length = metadata->getNumElementsOf(runId); // store vals (apply delta if necessary) if (numRLEs - 1 - r < numDeltas) { const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( &config, valId, maxSegmentSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( &config, valId, maxSegmentSize, deltaId, type, 0); // no bitpacking when delta is used as an intermediate step } config.nodes[deltaId].length = metadata->getNumElementsOf(deltaId); } } else { // RLE-less step const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( &config, inputId, maxSegmentSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( &config, inputId, maxSegmentSize, deltaId, type, 0); // no bitpacking when delta is used as an intermediate step } config.nodes[deltaId].length = metadata->getNumElementsOf(deltaId); } } // If there are no RLEs or Deltas, we will do a single BP step. if (numRLEs == 0 && numDeltas == 0) { const int inputId = vals_id; const int bpId = ++vals_id; nvcompConfigAddBP(&config, inputId, maxSegmentSize, bpId, type); config.nodes[bpId].length = metadata->getNumElementsOf(bpId); } return config; } template <typename T> constexpr bool isFixedWidth() { return std::is_same<T, char>::value || std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value || std::is_same<T, int16_t>::value || std::is_same<T, uint16_t>::value || std::is_same<T, int32_t>::value || std::is_same<T, uint32_t>::value || std::is_same<T, int64_t>::value || std::is_same<T, uint64_t>::value; } template <typename T> size_t writeFixedWidthData( const T* const val, void* const ptr, const size_t offset, const size_t maxSize) { assert(isFixedWidth<T>()); size_t newOffset = offset + sizeof(*val); if (ptr) { // only write if we're doing a really output if (newOffset > maxSize) { throw std::runtime_error( "Not enough room to write member, need at least " + std::to_string(newOffset) + " bytes, but given only " + std::to_string(maxSize)); } memcpy(static_cast<char*>(ptr) + offset, val, sizeof(*val)); } return newOffset; } template <typename T> size_t writeData( const T* const val, void* const ptr, const size_t offset, const size_t maxSize) { if (isFixedWidth<T>()) { return writeFixedWidthData(val, ptr, offset, maxSize); } else if (std::is_same<T, bool>::value) { const int8_t typedVal = static_cast<int8_t>(*val); return writeData(&typedVal, ptr, offset, maxSize); } else if (std::is_same<T, int>::value) { // on most systems this will not be used, as int32_t is usually defined as // int const int32_t typedVal = static_cast<int32_t>(*val); return writeData(&typedVal, ptr, offset, maxSize); } else if (std::is_same<T, size_t>::value) { const uint64_t typedVal = static_cast<uint64_t>(*val); return writeData(&typedVal, ptr, offset, maxSize); } else { throw std::runtime_error("Unsupported type for serialization."); } } } // namespace /************************************************************************************** * Older API definitions below. New API calls rely on them. **************************************************************************************/ nvcompIntConfig_t* createConfig(const CascadedMetadata* const metadata) { nvcompIntConfig_t* config = new nvcompIntConfig_t(); *config = generateConfig(metadata); return config; } void destroyConfig(nvcompIntConfig_t* config) { delete config; } nvcompError_t nvcompConfigAddRLE_BP( nvcompIntConfig_t* const config, int outputId, size_t maxOutputSize, int valId, nvcompType_t valType, int valPacking, int runId, nvcompType_t runType, int runPacking) { nvcompIntConfig_t& c = *config; // setup input nodes if necessary if (c.nodes.find(valId) == c.nodes.end()) { c.nodes[valId] = {NULL, valType, valPacking, NULL, 0, 0}; } if (c.nodes.find(runId) == c.nodes.end()) { c.nodes[runId] = {NULL, runType, runPacking, NULL, 0, 0}; } // create the output node if necessary if (c.nodes.find(outputId) == c.nodes.end()) { c.nodes[outputId] = {NULL, valType, 0, NULL, 0, 0}; } nvcompLayer_t layer = {NVCOMP_SCHEME_RLE, maxOutputSize, NULL, NULL, NULL, valId, runId, outputId}; c.layers.push_back(layer); c.nodes[outputId].parentLayer = &c.layers.back(); return nvcompSuccess; } nvcompError_t nvcompConfigAddDelta_BP( nvcompIntConfig_t* const config, int outputId, size_t maxOutputSize, int valId, nvcompType_t valType, int valPacking) { nvcompIntConfig_t& c = *config; // setup the input node if necessary if (c.nodes.find(valId) == c.nodes.end()) { c.nodes[valId] = {NULL, valType, valPacking, NULL, 0, 0}; } // create the output node if necessary if (c.nodes.find(outputId) == c.nodes.end()) { c.nodes[outputId] = {NULL, valType, 0, NULL, 0, 0}; } nvcompLayer_t layer = {NVCOMP_SCHEME_DELTA, maxOutputSize, NULL, NULL, NULL, valId, -1, outputId}; c.layers.push_back(layer); c.nodes[outputId].parentLayer = &c.layers.back(); return nvcompSuccess; } nvcompError_t nvcompConfigAddBP( nvcompIntConfig_t* const config, int outputId, size_t maxOutputSize, int valId, nvcompType_t valType) { nvcompIntConfig_t& c = *config; // setup the input node if necessary if (c.nodes.find(valId) == c.nodes.end()) { c.nodes[valId] = {NULL, valType, 1, NULL, 0, 0}; } // create the output node if necessary if (c.nodes.find(outputId) == c.nodes.end()) { c.nodes[outputId] = {NULL, valType, 0, NULL, 0, 0}; } nvcompLayer_t layer = { NVCOMP_SCHEME_BP, maxOutputSize, NULL, NULL, NULL, valId, -1, outputId}; c.layers.push_back(layer); c.nodes[outputId].parentLayer = &c.layers.back(); return nvcompSuccess; } size_t nvcompIntConfig_t::getWorkspaceBytes(nvcompDataNode_t* /*node*/) { // TODO: allocate output buffers for each node except the terminal one // currently this is done inside decompGPU which will break concurrency (once // we add streams) return 0; } size_t nvcompIntConfig_t::getWorkspaceBytes() { if (nodes.find(outputId) == nodes.end()) { throw std::runtime_error( "getWorkspaceBytes(): could not find output ID amongst nodes: " + std::to_string(outputId) + " with " + std::to_string(nodes.size()) + " nodes."); } if (nodes[outputId].parentLayer == NULL) { throw std::runtime_error("getWorkspaceBytes(): the output node is not used " "by any compression layers."); } int numRLEs = 0; int numDeltas = 0; size_t max_input_len = 0; for (const nvcompLayer_t& layer : layers) { if (layer.scheme == NVCOMP_SCHEME_RLE || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numRLEs; } if (layer.scheme == NVCOMP_SCHEME_DELTA || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numDeltas; } const size_t layer_len = nodes[layer.valId].length; if (layer_len > max_input_len) { max_input_len = layer_len; } } const size_t max_output_len = maxOutputSize; size_t size = 0; // temp vals, runs, delta, output if (numRLEs > 0 || numDeltas > 0) { size += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); if (numRLEs > 0) { size += CUDA_MEM_ALIGN( max_input_len * sizeOfnvcompType(selectRunsType(maxOutputSize))); } size += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); size += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); } size_t temp_scan_bytes_run = 0; size_t temp_scan_bytes_delta = 0; NVCOMP_TYPE_ONE_SWITCH( selectRunsType(max_output_len), cubDeviceScanTempSpace, temp_scan_bytes_run, max_input_len); NVCOMP_TYPE_ONE_SWITCH( outputType, cubDeviceScanTempSpace, temp_scan_bytes_delta, max_input_len); size_t temp_scan_bytes = ::max(temp_scan_bytes_run, temp_scan_bytes_delta); size += CUDA_MEM_ALIGN(temp_scan_bytes); size_t max_num_blocks = (max_output_len + RLE_ELEMS_PER_BLOCK - 1) / RLE_ELEMS_PER_BLOCK; size += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); size += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); return size; } nvcompError_t nvcompIntHandle_t::release() { return nvcompSuccess; } // recursively assign memory for all nodes in our DAG // ** Assumes worspaceStorage is already allocated with sufficient space ** nvcompError_t nvcompIntHandle_t::allocateAsync() { nvcompIntConfig_t& c = *config; nvcompType_t outputType = c.outputType; // assign member variables for size max_output_len = c.maxOutputSize; max_input_len = 0; int numRLEs = 0; int numDeltas = 0; for (const nvcompLayer_t& layer : c.layers) { if (layer.scheme == NVCOMP_SCHEME_RLE || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numRLEs; } if (layer.scheme == NVCOMP_SCHEME_DELTA || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numDeltas; } const size_t layer_len = c.nodes[layer.valId].length; if (layer_len > max_input_len) { max_input_len = layer_len; } } unsigned char* ptr = (unsigned char*)workspaceStorage; // temporary buffers that can hold RLE expansions and other data, but we will // re-use locations if (numRLEs > 0 || numDeltas > 0) { temp_val = ptr; ptr += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); if (numRLEs > 0) { temp_run = ptr; ptr += CUDA_MEM_ALIGN( max_input_len * sizeOfnvcompType(selectRunsType(max_output_len))); } temp_delta = ptr; ptr += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); // one additional buffer for delta expansion // TODO: can we get rid of this one? temp_output = ptr; ptr += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); } // allocate temp storage for cub scan using the largest size_t // this temp storage will be reused by delta and runs scans of different types temp_scan = ptr; size_t temp_scan_bytes_run = 0; size_t temp_scan_bytes_delta = 0; NVCOMP_TYPE_ONE_SWITCH( selectRunsType(max_output_len), cubDeviceScanTempSpace, temp_scan_bytes_run, max_input_len); NVCOMP_TYPE_ONE_SWITCH( outputType, cubDeviceScanTempSpace, temp_scan_bytes_delta, max_input_len); temp_scan_bytes = ::max(temp_scan_bytes_run, temp_scan_bytes_delta); ptr += CUDA_MEM_ALIGN(temp_scan_bytes); // block indices/offsets max_num_blocks = (max_output_len + RLE_ELEMS_PER_BLOCK - 1) / RLE_ELEMS_PER_BLOCK; start_ind = (size_t*)ptr; ptr += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); start_off = (size_t*)ptr; ptr += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); return nvcompSuccess; } // here we do kernel fusion void nvcompIntConfig_t::optimizeLayers() { for (auto it = layers.begin(); it != layers.end();) { if (it->scheme == NVCOMP_SCHEME_DELTA) { int valId = it->valId; int outputId = it->outputId; if (nodes.find(valId) != nodes.end() && nodes[valId].parentLayer != NULL && nodes[valId].parentLayer->scheme == NVCOMP_SCHEME_RLE) { nodes[outputId].parentLayer = nodes[valId].parentLayer; nodes[outputId].parentLayer->scheme = NVCOMP_SCHEME_RLE_DELTA; nodes[outputId].parentLayer->outputId = outputId; it = layers.erase(it); continue; } } it++; } } /* These functions may not be needed and removed to simplify codebase */ nvcompError_t nvcompSetWorkspace( nvcompHandle_t /*handle*/, void* /*workspaceStorage*/, size_t /*workspaceBytes*/) { std::cerr << "ERROR: nvcompSetWorkspace is not implemented yet!" << std::endl; return nvcompErrorNotSupported; } nvcompError_t nvcompGetWorkspaceSize(nvcompHandle_t handle, size_t* workspaceBytes) { *workspaceBytes = handles[handle].workspaceBytes; return nvcompSuccess; } nvcompError_t nvcompSetStream(nvcompHandle_t handle, hipStream_t streamId) { handles[handle].stream = streamId; return nvcompSuccess; } nvcompError_t nvcompGetStream(nvcompHandle_t handle, hipStream_t* streamId) { *streamId = handles[handle].stream; return nvcompSuccess; } // if the header is not packed this will shallow copy the pointer // otherwise unpack into the output buffer template <typename inputT, typename outputT> void unpackCpu( outputT** output, nvcompDataNode_t* node, const void* hdr, const void* data) { const CascadedMetadata::Header header = *static_cast<const CascadedMetadata::Header*>(hdr); if (node->packing) { for (size_t i = 0; i < header.length; ++i) { const inputT minValue = *CascadedMetadata::getMinValueLocation<inputT>(&header); (*output)[i] = unpackBytes(data, header.numBits, minValue, i); } } else { if (typeid(inputT) == typeid(outputT)) { *output = (outputT*)data; } else { for (size_t i = 0; i < header.length; i++) (*output)[i] = (outputT)((inputT*)data)[i]; } } } template <typename outputT> void unpackCpu( outputT** output, nvcompDataNode_t* node, const void* hdr, const void* data) { NVCOMP_TYPE_TWO_SWITCH_FIRST_ONLY( node->type, outputT, unpackCpu, output, node, hdr, data); } // if the header is not packed this will shallow copy the pointer if it's // accessible from the GPU otherwise copy or unpack into the output buffer template <typename inputT, typename outputT> void unpackGpu( outputT* d_output, nvcompDataNode_t* node, const void* data, const void* h_hdr, hipStream_t stream) { void* d_input = NULL; // prepare input data hipPointerAttribute_t attr; hipError_t err = hipPointerGetAttributes(&attr, data); if (err != hipSuccess) { std::ostringstream oss; oss << data; throw std::runtime_error( "unpackGpu(): Failed to get pointer attributes for " + oss.str() + " due to: " + std::to_string(err)); } if (attr.type != cudaMemoryTypeUnregistered) { // memory is accessible to the GPU d_input = attr.devicePointer; } else { throw std::runtime_error("unpackGpu(): Data not accessible to the GPU"); } // Get length of run from the host-side header size_t length = static_cast<const CascadedMetadata::Header*>(h_hdr)->length; CascadedMetadata::Header header = *static_cast<const CascadedMetadata::Header*>(h_hdr); const unsigned char numBits = header.numBits; const inputT minValue = *CascadedMetadata::getMinValueLocation<inputT>(&header); const dim3 block(512); const dim3 grid(roundUpDiv(length, block.x)); if (node->packing) { hipLaunchKernelGGL(( unpackBytesKernel), dim3(grid), dim3(block), 0, stream, d_input, d_output, numBits, minValue, length); } else { hipLaunchKernelGGL(( convertKernel), dim3(grid), dim3(block), 0, stream, static_cast<const inputT*>(d_input), d_output, length); } } template <typename outputT> void unpackGpu( outputT* d_output, nvcompDataNode_t* node, const void* data, const void* h_hdr, hipStream_t stream) { NVCOMP_TYPE_TWO_SWITCH_FIRST_ONLY( node->type, outputT, unpackGpu, d_output, node, data, h_hdr, stream); } template <typename outputT> nvcompError_t nvcompIntHandle_t::decompCPU( nvcompDataNode_t* node, const void** inputHdrs, const void** inputData) { size_t maxOutputSize = config->maxOutputSize; std::vector<outputT> unpacked_vals; std::vector<size_t> unpacked_runs; outputT* vals_data = NULL; size_t* runs_data = NULL; size_t vals_len; nvcompLayer_t* layer = node->parentLayer; // add BP only layer if (layer->scheme == NVCOMP_SCHEME_BP) { unpacked_vals.resize(maxOutputSize); vals_data = &unpacked_vals[0]; unpackCpu( &vals_data, layer->vals, inputHdrs[layer->valId], inputData[layer->valId]); vals_len = static_cast<const CascadedMetadata::Header*>(inputHdrs[layer->valId]) ->length; node->length = vals_len; // lazy allocation // TODO: move to allocate() if (node->ptr == NULL) node->ptr = new outputT[vals_len]; // copy and convert type if necessary for (int i = 0; i < vals_len; i++) { ((outputT*)(node->ptr))[i] = vals_data[i]; } return nvcompSuccess; } // compute vals if (layer->vals->parentLayer != NULL) { decompCPU<outputT>(layer->vals, inputHdrs, inputData); vals_data = (outputT*)layer->vals->ptr; vals_len = layer->vals->length; } else { unpacked_vals.resize(maxOutputSize); vals_data = &unpacked_vals[0]; unpackCpu( &vals_data, layer->vals, inputHdrs[layer->valId], inputData[layer->valId]); vals_len = static_cast<const CascadedMetadata::Header*>(inputHdrs[layer->valId]) ->length; } // compute runs if (layer->runs != NULL) { if (layer->runs->parentLayer != NULL) { decompCPU<size_t>(layer->runs, inputHdrs, inputData); runs_data = (size_t*)layer->runs->ptr; } else { unpacked_runs.resize(maxOutputSize); runs_data = &unpacked_runs[0]; unpackCpu( &runs_data, layer->runs, inputHdrs[layer->runId], inputData[layer->runId]); } } // decompress (this is using additional memory) std::vector<outputT> next; next.clear(); switch (layer->scheme) { case NVCOMP_SCHEME_RLE: { for (int i = 0; i < vals_len; i++) next.insert(next.end(), runs_data[i], vals_data[i]); break; } case NVCOMP_SCHEME_RLE_DELTA: { for (int i = 0; i < vals_len; i++) next.insert(next.end(), runs_data[i], vals_data[i]); for (int i = 1; i < next.size(); i++) next[i] += next[i - 1]; break; } case NVCOMP_SCHEME_DELTA: { next.resize(vals_len); next[0] = vals_data[0]; for (int i = 1; i < vals_len; i++) next[i] = next[i - 1] + vals_data[i]; break; } default: return nvcompErrorNotSupported; } node->length = next.size(); // lazy allocation // TODO: move to allocate() if (node->ptr == NULL) node->ptr = new outputT[next.size()]; // copy and convert type if necessary for (int i = 0; i < next.size(); i++) ((outputT*)(node->ptr))[i] = next[i]; return nvcompSuccess; } // Perform Cascaded decompression on the GPU. // Assumes all workspace is pre-allocated and assigned, inputHdrs and inputData // are GPU-accessible, and h_headers is CPU-accessible template <typename outputT, typename runT> nvcompError_t nvcompIntHandle_t::decompGPU( nvcompDataNode_t* node, const void** inputData, const void** h_headers, hipStream_t stream = NULL) { // prepare device output buffer if necessary // TODO: move to the init step hipPointerAttribute_t attr; outputT* out_ptr = NULL; // get typed copies of pointers to avoid casting outputT* const localOutput = static_cast<outputT*>(temp_output); outputT* const localDelta = static_cast<outputT*>(temp_delta); runT* const localRun = static_cast<runT*>(temp_run); if (node->ptr == nullptr) { throw std::runtime_error( "nvcompIntHandle_t::decompGPU(): Got node with null ptr."); } hipError_t err = hipPointerGetAttributes(&attr, node->ptr); if (err != hipSuccess) { throw std::runtime_error( "nvcompIntHandle_t::decompGPU(): Failed to get cuda pointer " "attributes: " + std::to_string(err)); } if (attr.type != cudaMemoryTypeUnregistered) { // direct access is possible out_ptr = (outputT*)attr.devicePointer; } else { throw std::runtime_error("nvcompIntHandle_t::decompGPU(): Workspace memory " "not accessible to GPU."); } nvcompLayer_t* layer = node->parentLayer; if (layer->scheme == NVCOMP_SCHEME_BP) { // We assume this is the only layer, and we just do it and exit layer->vals->ptr = out_ptr; unpackGpu( (outputT*)layer->vals->ptr, layer->vals, inputData[layer->valId], h_headers[layer->valId], stream); layer->vals->length = static_cast<const CascadedMetadata::Header*>(h_headers[layer->valId]) ->length; assert(layer->vals->length <= max_input_len); return nvcompSuccess; } // prepare inputs std::swap(temp_output, temp_val); if (layer->vals->parentLayer != NULL) { layer->vals->ptr = localOutput; // when recursing, swap decompGPU<outputT, runT>(layer->vals, inputData, h_headers, stream); } else { // unpack RLE values layer->vals->ptr = localOutput; unpackGpu( (outputT*)layer->vals->ptr, layer->vals, inputData[layer->valId], h_headers[layer->valId], stream); layer->vals->length = static_cast<const CascadedMetadata::Header*>(h_headers[layer->valId]) ->length; assert(layer->vals->length <= max_input_len); } if (layer->runs != nullptr) { if (layer->runs->parentLayer != nullptr) { throw std::runtime_error("decompGPU(): Runs cannot have parent layers."); } else { // unpack RLE runs layer->runs->ptr = localRun; unpackGpu( (runT*)layer->runs->ptr, layer->runs, inputData[layer->runId], h_headers[layer->runId], stream); layer->runs->length = static_cast<const CascadedMetadata::Header*>( h_headers[layer->runId]) ->length; } } outputT* d_vals = (outputT*)layer->vals->ptr; const size_t input_size = layer->vals->length; assert(input_size <= max_input_len); if (layer->scheme == NVCOMP_SCHEME_DELTA) { assert(out_ptr != d_vals); hipcub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, d_vals, out_ptr, input_size, stream); } else { // must be RLE of some form runT* d_runs = (runT*)layer->runs->ptr; assert(layer->runs->length == input_size); if (layer->scheme == NVCOMP_SCHEME_RLE_DELTA) { const dim3 block(512); const dim3 grid(roundUpDiv(input_size, block.x)); hipLaunchKernelGGL(( vecMultKernel), dim3(grid), dim3(block), 0, stream, d_vals, d_runs, localDelta, input_size); // inclusive scan to compute Delta sums hipcub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, localDelta, localDelta, input_size, stream); } // inclusive scan to compute RLE offsets // TODO: could be merged with the unpack kernel? hipcub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, d_runs, d_runs, input_size, stream); const size_t output_length = node->length; // precompute start/end boundaries for each CUDA block size_t output_grid = (output_length + RLE_ELEMS_PER_BLOCK - 1) / RLE_ELEMS_PER_BLOCK; size_t output_grid_block = (output_grid + RLE_THREAD_BLOCK - 1) / RLE_THREAD_BLOCK; hipLaunchKernelGGL(( searchBlockBoundaries<runT, RLE_THREAD_BLOCK, RLE_ELEMS_PER_THREAD>) , dim3(output_grid_block), dim3(RLE_THREAD_BLOCK), 0, stream, start_ind, start_off, output_grid, input_size, d_runs); // expand RLE and apply Delta: buf[r] -> buf[r+1] // TODO: implement macro to look nicer? switch (layer->scheme) { case NVCOMP_SCHEME_RLE_DELTA: hipLaunchKernelGGL(( expandRLEDelta< outputT, outputT, runT, RLE_THREAD_BLOCK, RLE_ELEMS_PER_THREAD, true>), dim3(output_grid), dim3(RLE_THREAD_BLOCK), 0, stream, (outputT*)out_ptr, output_length, d_vals, d_runs, localDelta, start_ind, start_off); break; case NVCOMP_SCHEME_RLE: hipLaunchKernelGGL(( expandRLEDelta< outputT, outputT, runT, RLE_THREAD_BLOCK, RLE_ELEMS_PER_THREAD, false>), dim3(output_grid), dim3(RLE_THREAD_BLOCK), 0, stream, (outputT*)out_ptr, output_length, d_vals, d_runs, localDelta, start_ind, start_off); break; default: throw std::runtime_error( "Invalid rle scheme: " + std::to_string(layer->scheme)); } } return nvcompSuccess; } nvcompError_t nvcompSetNodeLength(nvcompHandle_t handle, int nodeId, size_t output_length) { nvcompIntHandle_t& h = handles[handle]; nvcompIntConfig_t& c = *h.config; c.nodes[nodeId].length = output_length; return nvcompSuccess; } // Main function that sets up Cascaded decompression from the old API. // the new cascaded decompression API call is just a wrapper around this (though // heavily modified to be asynchronous). template <typename outputType, typename runType> nvcompError_t nvcompDecompressLaunch( nvcompHandle_t handle, void* outputData, const size_t outputSize, const void** inputData, const void** h_headers) { nvcompIntHandle_t& h = handles[handle]; nvcompIntConfig_t& c = *h.config; // TODO: assign all the buffers nvcompDataNode_t* terminal_node = &c.nodes[c.outputId]; terminal_node->ptr = outputData; nvcompError_t ret = h.decompGPU<outputType, runType>( terminal_node, inputData, h_headers, h.stream); const size_t neededBytes = terminal_node->length * sizeof(outputType); if (outputSize < neededBytes) { std::cerr << "Insufficient space to write decompressed date: given " << outputSize << " bytes but need " << neededBytes << " bytes." << std::endl; return nvcompErrorInvalidValue; } // this is to enable the correct result for multi-chunk execuation for (auto it = c.nodes.begin(); it != c.nodes.end(); it++) { it->second.length = 0; } return ret; } nvcompError_t nvcompDecompressLaunch( nvcompHandle_t handle, const size_t numUncompressedElements, void* const outputData, const size_t outputSize, const void** const inputData, const void** const h_headers) { const nvcompType_t outputType = handles[handle].config->outputType; const nvcompType_t runType = selectRunsType(numUncompressedElements); NVCOMP_TYPE_TWO_SWITCH_RETURN( outputType, runType, nvcompDecompressLaunch, handle, outputData, outputSize, inputData, h_headers); } nvcompError_t nvcompDestroyHandle(nvcompHandle_t handle) { nvcompIntHandle_t& h = handles[handle]; nvcompIntConfig_t& c = *h.config; // free temp memory h.release(); // clear all local nodes attached to this config c.nodes.clear(); // remove the handle from the list handles.erase(handle); return nvcompSuccess; } // Modified version of handle creation function from previous API to now be // asynchronous Assumes workspaceStorage is already allocated. nvcompError_t nvcompCreateHandleAsync( nvcompHandle_t* handle, nvcompIntConfig_t* const config, void* workspaceStorage, const size_t workspaceBytes, hipStream_t stream) { std::lock_guard<std::mutex> guard(handle_mutex); nvcompIntConfig_t& c = *config; // first - optimize the plan c.optimizeLayers(); // assign pointers - at this point the nodes map is set for (auto it = c.layers.begin(); it != c.layers.end(); it++) { it->vals = &c.nodes[it->valId]; it->output = &c.nodes[it->outputId]; if (it->runId >= 0) it->runs = &c.nodes[it->runId]; } if (workspaceBytes < c.getWorkspaceBytes()) { std::cerr << "Insufficient workspace size: got " << workspaceBytes << " but need " << c.getWorkspaceBytes() << std::endl; return nvcompErrorInvalidValue; } // find the next available id nvcompHandle_t id = handles.find_next(); *handle = id; nvcompIntHandle_t& h = handles[id]; h.config = config; h.stream = stream; h.workspaceBytes = workspaceBytes; h.workspaceStorage = workspaceStorage; h.allocateAsync(); return nvcompSuccess; } } // namespace nvcomp using namespace nvcomp; nvcompError_t nvcompCascadedDecompressGetMetadata( const void* in_ptr, const size_t in_bytes, void** metadata_ptr, hipStream_t stream) { try { CHECK_NOT_NULL(in_ptr); CHECK_NOT_NULL(metadata_ptr); CascadedMetadataOnGPU gpuMetadata((void*)in_ptr, in_bytes); *metadata_ptr = new CascadedMetadata(gpuMetadata.copyToHost()); CudaUtils::sync(stream); } catch (const std::exception& e) { return Check::exception_to_error( e, "nvcompCascadedDecompressGetMetadata()"); } return nvcompSuccess; } void nvcompCascadedDecompressDestroyMetadata(void* const metadata_ptr) { CascadedMetadata* metadata = static_cast<CascadedMetadata*>(metadata_ptr); ::operator delete(metadata); } // TODO: improve estimate with a more sophistocated approach. nvcompError_t nvcompCascadedDecompressGetTempSize( const void* metadata_ptr, size_t* temp_bytes) { try { CHECK_NOT_NULL(metadata_ptr); CHECK_NOT_NULL(temp_bytes); CascadedMetadata* metadata = (CascadedMetadata*)metadata_ptr; nvcompIntConfig_t c = generateConfig(metadata); // first - optimize the plan c.optimizeLayers(); // assign pointers - at this point the nodes map is set for (auto it = c.layers.begin(); it != c.layers.end(); it++) { it->vals = &c.nodes[it->valId]; it->output = &c.nodes[it->outputId]; if (it->runId >= 0) { it->runs = &c.nodes[it->runId]; } } // Return the required temp storage size *temp_bytes = c.getWorkspaceBytes(); } catch (const std::exception& e) { return Check::exception_to_error( e, "nvcompCascadedDecompressGetTempSize()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedDecompressGetOutputSize( const void* metadata_ptr, size_t* output_bytes) { try { *output_bytes = static_cast<const CascadedMetadata*>(metadata_ptr) ->getUncompressedSize(); } catch (const std::exception& e) { return Check::exception_to_error( e, "nvcompCascadedDecompressionGetOutputSize()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedDecompressAsync( const void* const in_ptr, const size_t in_bytes, void* const temp_ptr, const size_t temp_bytes, const void* const metadata_ptr, void* const out_ptr, const size_t out_bytes, hipStream_t stream) { nvcompHandle_t handle = -1; try { CHECK_NOT_NULL(metadata_ptr); const CascadedMetadata* const metadata = static_cast<const CascadedMetadata*>(metadata_ptr); if (in_bytes < metadata->getCompressedSize()) { throw NVCompException( nvcompErrorInvalidValue, "in_bytes is smaller than compressed data size: " + std::to_string(in_bytes) + " < " + std::to_string(metadata->getCompressedSize())); } nvcompIntConfig_t c = generateConfig(metadata); // first - optimize the plan c.optimizeLayers(); // assign pointers - at this point the nodes map is set for (auto it = c.layers.begin(); it != c.layers.end(); it++) { it->vals = &c.nodes[it->valId]; it->output = &c.nodes[it->outputId]; if (it->runId >= 0) { it->runs = &c.nodes[it->runId]; } } CHECK_API_CALL( nvcompCreateHandleAsync(&handle, &c, temp_ptr, temp_bytes, stream)); assert(handle >= 0); // Pointers to different portions of compressed data std::vector<void*> inputData(metadata->getNumInputs(), nullptr); std::vector<CascadedMetadata::Header> inputHdrs; std::vector<CascadedMetadata::Header*> cpuHdrs; for (size_t i = 0; i < metadata->getNumInputs(); i++) { inputHdrs.emplace_back(metadata->getHeader(i)); inputData[i] = &((char*)in_ptr)[metadata->getDataOffset(i)]; } for (CascadedMetadata::Header& hdr : inputHdrs) { cpuHdrs.emplace_back(&hdr); } nvcompDecompressLaunch( handle, metadata->getNumUncompressedElements(), out_ptr, out_bytes, (const void**)inputData.data(), (const void**)cpuHdrs.data()); nvcompDestroyHandle(handle); } catch (const std::exception& e) { if (handle >= 0) { nvcompDestroyHandle(handle); } return Check::exception_to_error(e, "nvcompCascadedDecompressAsync()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedCompressGetTempSize( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const format_opts, size_t* const temp_bytes) { try { checkCompressSize(in_bytes); nvcompCascadedCompressionGPU::computeWorkspaceSize( in_ptr, in_bytes, in_type, format_opts, temp_bytes); } catch (const std::exception& e) { return Check::exception_to_error(e, "nvcompCascadedCompressGetTempSize()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedCompressGetOutputSize( const void* in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* format_opts, void* const temp_ptr, const size_t temp_bytes, size_t* const out_bytes, const int exact_out_bytes) { try { checkCompressSize(in_bytes); if (exact_out_bytes) { throw std::runtime_error("Exact output bytes is unimplemented at " "this time."); } nvcompCascadedCompressionGPU::generateOutputUpperBound( in_ptr, in_bytes, in_type, format_opts, temp_ptr, temp_bytes, out_bytes); } catch (const std::exception& e) { return Check::exception_to_error( e, "nvcompCascadedCompressGetOutputSize()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedCompressAsync( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const format_opts, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, size_t* const out_bytes, hipStream_t stream) { try { checkCompressSize(in_bytes); CHECK_NOT_NULL(out_bytes); if (*out_bytes == 0) { throw NVCompException( nvcompErrorInvalidValue, "Output size cannot be zero. Make sure " "to set the size of out_bytes to size of output space allocated " "for compressed output."); } nvcompCascadedCompressionGPU::compressAsync( in_ptr, in_bytes, in_type, format_opts, temp_ptr, temp_bytes, out_ptr, out_bytes, stream); } catch (const std::exception& e) { return Check::exception_to_error(e, "nvcompCascadedCompressAsync()"); } return nvcompSuccess; } /***************************************************************************** * Definitions of API calls for automatically selected compression ****************************************************************************/ nvcompError_t nvcompCascadedCompressAutoGetTempSize( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, size_t* const temp_bytes) { // Assume the scheme that requires the most temp space nvcompCascadedFormatOpts biggest_opts; biggest_opts.num_RLEs = 2; biggest_opts.num_deltas = 2; biggest_opts.use_bp = 1; return API_WRAPPER(nvcompCascadedCompressGetTempSize( in_ptr, in_bytes, in_type, &biggest_opts, temp_bytes), "nvcompCascadedCompressAutoGetTempSize()"); } nvcompError_t nvcompCascadedCompressAutoGetOutputSize( const void* in_ptr, size_t in_bytes, nvcompType_t in_type, void* temp_ptr, size_t temp_bytes, size_t* out_bytes) { // Assume the scheme that can result in the largest output nvcompCascadedFormatOpts biggest_opts; biggest_opts.num_RLEs = 2; biggest_opts.num_deltas = 2; biggest_opts.use_bp = 1; return API_WRAPPER(nvcompCascadedCompressGetOutputSize( in_ptr, in_bytes, in_type, &biggest_opts, temp_ptr, temp_bytes, out_bytes, 0), "nvcompCascadedCompressAutoGetOutputSize()"); } nvcompError_t nvcompCascadedCompressAuto( const void* in_ptr, size_t in_bytes, nvcompType_t in_type, void* temp_ptr, size_t temp_bytes, void* out_ptr, size_t* out_bytes, hipStream_t stream) { try { nvcompCascadedSelectorOpts selector_opts; selector_opts.sample_size = 1024; selector_opts.num_samples = 100; size_t type_bytes = sizeOfnvcompType(in_type); // Adjust sample size if input is too small if(in_bytes < (selector_opts.sample_size * selector_opts.num_samples * type_bytes)) { selector_opts.sample_size = in_bytes / (10*type_bytes); selector_opts.num_samples = 10; } nvcompCascadedFormatOpts format_opts; double est_ratio; // Run selector to get format opts for compression CHECK_API_CALL(nvcompCascadedSelectorSelectConfig( in_ptr, in_bytes, in_type, selector_opts, temp_ptr, temp_bytes, &format_opts, &est_ratio, stream)); CudaUtils::sync(stream); // Run compression CHECK_API_CALL(nvcompCascadedCompressAsync( in_ptr, in_bytes, in_type, &format_opts, temp_ptr, temp_bytes, out_ptr, out_bytes, stream)); } catch (const std::exception& e) { return Check::exception_to_error(e, "nvcompCascadedCompressAuto()"); } return nvcompSuccess; } /***************************************************************************** * Definitions of API calls for automatically selected compression ****************************************************************************/ nvcompError_t nvcompCascadedCompressAutoGetTempSize( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, size_t* const temp_bytes) { // Assume the scheme that requires the most temp space nvcompCascadedFormatOpts biggest_opts; biggest_opts.num_RLEs = 2; biggest_opts.num_deltas = 2; biggest_opts.use_bp = 1; return nvcompCascadedCompressGetTempSize( in_ptr, in_bytes, in_type, &biggest_opts, temp_bytes); } nvcompError_t nvcompCascadedCompressAutoGetOutputSize( const void* in_ptr, size_t in_bytes, nvcompType_t in_type, void* temp_ptr, size_t temp_bytes, size_t* out_bytes) { // Assume the scheme that can result in the largest output nvcompCascadedFormatOpts biggest_opts; biggest_opts.num_RLEs = 2; biggest_opts.num_deltas = 2; biggest_opts.use_bp = 1; return nvcompCascadedCompressGetOutputSize( in_ptr, in_bytes, in_type, &biggest_opts, temp_ptr, temp_bytes, out_bytes, 0); } nvcompError_t nvcompCascadedCompressAuto( const void* in_ptr, size_t in_bytes, nvcompType_t in_type, void* temp_ptr, size_t temp_bytes, void* out_ptr, size_t* out_bytes, hipStream_t stream) { nvcompCascadedSelectorOpts selector_opts; selector_opts.sample_size = 1024; selector_opts.num_samples = 100; size_t type_bytes = sizeOfnvcompType(in_type); // Adjust sample size if input is too small if(in_bytes < (selector_opts.sample_size * selector_opts.num_samples * type_bytes)) { selector_opts.sample_size = in_bytes / (10*type_bytes); selector_opts.num_samples = 10; } nvcompCascadedFormatOpts format_opts; nvcompError_t err; double est_ratio; // Run selector to get format opts for compression err = nvcompCascadedSelectorSelectConfig( in_ptr, in_bytes, in_type, selector_opts, temp_ptr, temp_bytes, &format_opts, &est_ratio, stream); if(err != nvcompSuccess) { return err; } hipStreamSynchronize(stream); // Run compression err = nvcompCascadedCompressAsync( in_ptr, in_bytes, in_type, &format_opts, temp_ptr, temp_bytes, out_ptr, out_bytes, stream); return err; }
4e4c033dcde35f32c348f21d1eca4ee5f607adbf.cu
/* * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "CascadedCompressionGPU.h" #include "CascadedMetadata.h" #include "CascadedMetadataOnGPU.h" #include "CascadedDecompressionKernels.cuh" #include "Check.h" #include "CudaUtils.h" #include "cascaded.h" #include "nvcomp.h" #include "nvcomp.hpp" #include "type_macros.h" #include "unpack.h" #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Weffc++" #pragma GCC diagnostic ignored "-Wunused-parameter" #endif #include <cub/cub.cuh> #ifdef __GNUC__ #pragma GCC diagnostic pop #endif #ifdef USE_RMM #include <rmm/rmm.h> #endif #include <cassert> #include <iostream> #include <list> #include <map> #include <mutex> #include <sstream> #include <vector> // align all temp allocations by 512B #define CUDA_MEM_ALIGN(size) (((size) + 0x1FF) & ~0x1FF) #ifndef RLE_THREAD_BLOCK #define RLE_THREAD_BLOCK 128 #endif #ifndef RLE_ELEMS_PER_THREAD #define RLE_ELEMS_PER_THREAD 4 #endif #define RLE_ELEMS_PER_BLOCK (RLE_THREAD_BLOCK * RLE_ELEMS_PER_THREAD) namespace nvcomp { // internal representations: one kernel per scheme enum nvcompScheme_t { NVCOMP_SCHEME_BP, NVCOMP_SCHEME_RLE, NVCOMP_SCHEME_DELTA, NVCOMP_SCHEME_RLE_DELTA, // automatically fused RLE+Delta to reduce mem // traffic }; struct nvcompLayer_t; struct nvcompDataNode_t { void* ptr; nvcompType_t type; int packing; nvcompLayer_t* parentLayer; size_t length; // to enable BP as a separate layer, default -1 int pointToId; }; struct nvcompLayer_t { nvcompScheme_t scheme; size_t maxOutputSize; nvcompDataNode_t* vals; nvcompDataNode_t* runs; nvcompDataNode_t* output; // TODO: can we get rid of those int valId; int runId; int outputId; }; struct nvcompIntConfig_t { int outputId = 0; nvcompType_t outputType = NVCOMP_TYPE_INT; size_t maxOutputSize = 0; std::list<nvcompLayer_t> layers = {}; std::map<int, nvcompDataNode_t> nodes = {}; // TODO: should we make this nvcompData_t instead of int? // compute the workspace size size_t getWorkspaceBytes(); size_t getWorkspaceBytes(nvcompDataNode_t* node); // fuse kernels, etc. void optimizeLayers(); }; struct nvcompIntTask_t { // TODO: add CUDA event assigned to this task }; struct nvcompIntHandle_t { nvcompIntConfig_t* config = nullptr; cudaStream_t stream = 0; // main decomp functions template <typename outputT> nvcompError_t decompCPU( nvcompDataNode_t* node, const void** inputData, const void** h_headers); template <typename outputT, typename runT> nvcompError_t decompGPU( nvcompDataNode_t* node, const void** inputData, const void** h_headers, cudaStream_t stream); // workspace memory size_t workspaceBytes = 0; void* workspaceStorage = nullptr; // workspace mem management nvcompError_t release(); nvcompError_t allocateAsync(); // new function that splits of pre-allocated memory // workspace breakdown size_t max_input_len = 0; // maximum input RLE length size_t max_output_len = 0; // maximum output RLE length void* temp_val = nullptr; // temp RLE val expansions void* temp_run = nullptr; // temp RLE run expansions void* temp_delta = nullptr; // temp Delta expansions void* temp_output = nullptr; // temp Delta expansions // cub scan memory size_t temp_scan_bytes = 0; void* temp_scan = nullptr; // block indices start and offsets size_t max_num_blocks = 0; size_t* start_ind = nullptr; size_t* start_off = nullptr; }; template <typename keyT, typename valueT> struct SharedMap { std::map<keyT, valueT> data = {}; std::mutex m = {}; // find the next available id keyT find_next() { std::lock_guard<std::mutex> guard(m); int id = 0; while (data.find(id) != data.end()) id++; return (keyT)id; } bool exists(const keyT& key) { std::lock_guard<std::mutex> guard(m); return data.find(key) != data.end(); } void insert(const keyT& key, const valueT& val) { std::lock_guard<std::mutex> guard(m); if (data.find(key) == data.end()) data[key] = val; } valueT& operator[](const keyT& key) { std::lock_guard<std::mutex> guard(m); return data[key]; } void erase(const keyT& key) { std::lock_guard<std::mutex> guard(m); data.erase(key); } }; // internal collections SharedMap<nvcompConfig_t, nvcompIntConfig_t> configs; SharedMap<nvcompHandle_t, nvcompIntHandle_t> handles; // TODO: can we get rid of these? std::mutex config_mutex; std::mutex handle_mutex; namespace { template <typename T> void cubDeviceScanTempSpace(size_t& temp_scan_bytes, const size_t max_input_len) { void* temp_scan = nullptr; T* temp_run = nullptr; cub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, temp_run, temp_run, max_input_len); } void checkCompressSize(const size_t numBytes) { const size_t maxBytes = static_cast<size_t>(std::numeric_limits<int>::max()); if (numBytes > maxBytes) { throw std::runtime_error( "Cascaded compression can only compress up to a maximum of " + std::to_string(maxBytes) + " bytes at a time (requested " + std::to_string(numBytes) + " bytes)."); } } nvcompIntConfig_t generateConfig(const CascadedMetadata* const metadata) { const int numRLEs = metadata->getNumRLEs(); const int numDeltas = metadata->getNumDeltas(); const bool bitPacking = metadata->useBitPacking(); int vals_id = 0; // initialize config const nvcompType_t type = metadata->getValueType(); nvcompIntConfig_t config; config.outputId = vals_id; config.outputType = type; config.maxOutputSize = metadata->getUncompressedSize(); const nvcompType_t runType = selectRunsType(metadata->getNumUncompressedElements()); const size_t maxSegmentSize = metadata->getUncompressedSize(); config.nodes[0].length = metadata->getNumUncompressedElements(); // A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally // having bit packing applied const int numSteps = std::max(numRLEs, numDeltas); for (int r = numSteps - 1; r >= 0; r--) { const int inputId = vals_id; if (numSteps - r - 1 < numRLEs) { const int runId = ++vals_id; const int valId = ++vals_id; // add to config nvcompConfigAddRLE_BP( &config, inputId, maxSegmentSize, valId, type, bitPacking, runId, runType, bitPacking); config.nodes[valId].length = metadata->getNumElementsOf(valId); config.nodes[runId].length = metadata->getNumElementsOf(runId); // store vals (apply delta if necessary) if (numRLEs - 1 - r < numDeltas) { const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( &config, valId, maxSegmentSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( &config, valId, maxSegmentSize, deltaId, type, 0); // no bitpacking when delta is used as an intermediate step } config.nodes[deltaId].length = metadata->getNumElementsOf(deltaId); } } else { // RLE-less step const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( &config, inputId, maxSegmentSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( &config, inputId, maxSegmentSize, deltaId, type, 0); // no bitpacking when delta is used as an intermediate step } config.nodes[deltaId].length = metadata->getNumElementsOf(deltaId); } } // If there are no RLEs or Deltas, we will do a single BP step. if (numRLEs == 0 && numDeltas == 0) { const int inputId = vals_id; const int bpId = ++vals_id; nvcompConfigAddBP(&config, inputId, maxSegmentSize, bpId, type); config.nodes[bpId].length = metadata->getNumElementsOf(bpId); } return config; } template <typename T> constexpr bool isFixedWidth() { return std::is_same<T, char>::value || std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value || std::is_same<T, int16_t>::value || std::is_same<T, uint16_t>::value || std::is_same<T, int32_t>::value || std::is_same<T, uint32_t>::value || std::is_same<T, int64_t>::value || std::is_same<T, uint64_t>::value; } template <typename T> size_t writeFixedWidthData( const T* const val, void* const ptr, const size_t offset, const size_t maxSize) { assert(isFixedWidth<T>()); size_t newOffset = offset + sizeof(*val); if (ptr) { // only write if we're doing a really output if (newOffset > maxSize) { throw std::runtime_error( "Not enough room to write member, need at least " + std::to_string(newOffset) + " bytes, but given only " + std::to_string(maxSize)); } memcpy(static_cast<char*>(ptr) + offset, val, sizeof(*val)); } return newOffset; } template <typename T> size_t writeData( const T* const val, void* const ptr, const size_t offset, const size_t maxSize) { if (isFixedWidth<T>()) { return writeFixedWidthData(val, ptr, offset, maxSize); } else if (std::is_same<T, bool>::value) { const int8_t typedVal = static_cast<int8_t>(*val); return writeData(&typedVal, ptr, offset, maxSize); } else if (std::is_same<T, int>::value) { // on most systems this will not be used, as int32_t is usually defined as // int const int32_t typedVal = static_cast<int32_t>(*val); return writeData(&typedVal, ptr, offset, maxSize); } else if (std::is_same<T, size_t>::value) { const uint64_t typedVal = static_cast<uint64_t>(*val); return writeData(&typedVal, ptr, offset, maxSize); } else { throw std::runtime_error("Unsupported type for serialization."); } } } // namespace /************************************************************************************** * Older API definitions below. New API calls rely on them. **************************************************************************************/ nvcompIntConfig_t* createConfig(const CascadedMetadata* const metadata) { nvcompIntConfig_t* config = new nvcompIntConfig_t(); *config = generateConfig(metadata); return config; } void destroyConfig(nvcompIntConfig_t* config) { delete config; } nvcompError_t nvcompConfigAddRLE_BP( nvcompIntConfig_t* const config, int outputId, size_t maxOutputSize, int valId, nvcompType_t valType, int valPacking, int runId, nvcompType_t runType, int runPacking) { nvcompIntConfig_t& c = *config; // setup input nodes if necessary if (c.nodes.find(valId) == c.nodes.end()) { c.nodes[valId] = {NULL, valType, valPacking, NULL, 0, 0}; } if (c.nodes.find(runId) == c.nodes.end()) { c.nodes[runId] = {NULL, runType, runPacking, NULL, 0, 0}; } // create the output node if necessary if (c.nodes.find(outputId) == c.nodes.end()) { c.nodes[outputId] = {NULL, valType, 0, NULL, 0, 0}; } nvcompLayer_t layer = {NVCOMP_SCHEME_RLE, maxOutputSize, NULL, NULL, NULL, valId, runId, outputId}; c.layers.push_back(layer); c.nodes[outputId].parentLayer = &c.layers.back(); return nvcompSuccess; } nvcompError_t nvcompConfigAddDelta_BP( nvcompIntConfig_t* const config, int outputId, size_t maxOutputSize, int valId, nvcompType_t valType, int valPacking) { nvcompIntConfig_t& c = *config; // setup the input node if necessary if (c.nodes.find(valId) == c.nodes.end()) { c.nodes[valId] = {NULL, valType, valPacking, NULL, 0, 0}; } // create the output node if necessary if (c.nodes.find(outputId) == c.nodes.end()) { c.nodes[outputId] = {NULL, valType, 0, NULL, 0, 0}; } nvcompLayer_t layer = {NVCOMP_SCHEME_DELTA, maxOutputSize, NULL, NULL, NULL, valId, -1, outputId}; c.layers.push_back(layer); c.nodes[outputId].parentLayer = &c.layers.back(); return nvcompSuccess; } nvcompError_t nvcompConfigAddBP( nvcompIntConfig_t* const config, int outputId, size_t maxOutputSize, int valId, nvcompType_t valType) { nvcompIntConfig_t& c = *config; // setup the input node if necessary if (c.nodes.find(valId) == c.nodes.end()) { c.nodes[valId] = {NULL, valType, 1, NULL, 0, 0}; } // create the output node if necessary if (c.nodes.find(outputId) == c.nodes.end()) { c.nodes[outputId] = {NULL, valType, 0, NULL, 0, 0}; } nvcompLayer_t layer = { NVCOMP_SCHEME_BP, maxOutputSize, NULL, NULL, NULL, valId, -1, outputId}; c.layers.push_back(layer); c.nodes[outputId].parentLayer = &c.layers.back(); return nvcompSuccess; } size_t nvcompIntConfig_t::getWorkspaceBytes(nvcompDataNode_t* /*node*/) { // TODO: allocate output buffers for each node except the terminal one // currently this is done inside decompGPU which will break concurrency (once // we add streams) return 0; } size_t nvcompIntConfig_t::getWorkspaceBytes() { if (nodes.find(outputId) == nodes.end()) { throw std::runtime_error( "getWorkspaceBytes(): could not find output ID amongst nodes: " + std::to_string(outputId) + " with " + std::to_string(nodes.size()) + " nodes."); } if (nodes[outputId].parentLayer == NULL) { throw std::runtime_error("getWorkspaceBytes(): the output node is not used " "by any compression layers."); } int numRLEs = 0; int numDeltas = 0; size_t max_input_len = 0; for (const nvcompLayer_t& layer : layers) { if (layer.scheme == NVCOMP_SCHEME_RLE || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numRLEs; } if (layer.scheme == NVCOMP_SCHEME_DELTA || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numDeltas; } const size_t layer_len = nodes[layer.valId].length; if (layer_len > max_input_len) { max_input_len = layer_len; } } const size_t max_output_len = maxOutputSize; size_t size = 0; // temp vals, runs, delta, output if (numRLEs > 0 || numDeltas > 0) { size += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); if (numRLEs > 0) { size += CUDA_MEM_ALIGN( max_input_len * sizeOfnvcompType(selectRunsType(maxOutputSize))); } size += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); size += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); } size_t temp_scan_bytes_run = 0; size_t temp_scan_bytes_delta = 0; NVCOMP_TYPE_ONE_SWITCH( selectRunsType(max_output_len), cubDeviceScanTempSpace, temp_scan_bytes_run, max_input_len); NVCOMP_TYPE_ONE_SWITCH( outputType, cubDeviceScanTempSpace, temp_scan_bytes_delta, max_input_len); size_t temp_scan_bytes = std::max(temp_scan_bytes_run, temp_scan_bytes_delta); size += CUDA_MEM_ALIGN(temp_scan_bytes); size_t max_num_blocks = (max_output_len + RLE_ELEMS_PER_BLOCK - 1) / RLE_ELEMS_PER_BLOCK; size += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); size += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); return size; } nvcompError_t nvcompIntHandle_t::release() { return nvcompSuccess; } // recursively assign memory for all nodes in our DAG // ** Assumes worspaceStorage is already allocated with sufficient space ** nvcompError_t nvcompIntHandle_t::allocateAsync() { nvcompIntConfig_t& c = *config; nvcompType_t outputType = c.outputType; // assign member variables for size max_output_len = c.maxOutputSize; max_input_len = 0; int numRLEs = 0; int numDeltas = 0; for (const nvcompLayer_t& layer : c.layers) { if (layer.scheme == NVCOMP_SCHEME_RLE || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numRLEs; } if (layer.scheme == NVCOMP_SCHEME_DELTA || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numDeltas; } const size_t layer_len = c.nodes[layer.valId].length; if (layer_len > max_input_len) { max_input_len = layer_len; } } unsigned char* ptr = (unsigned char*)workspaceStorage; // temporary buffers that can hold RLE expansions and other data, but we will // re-use locations if (numRLEs > 0 || numDeltas > 0) { temp_val = ptr; ptr += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); if (numRLEs > 0) { temp_run = ptr; ptr += CUDA_MEM_ALIGN( max_input_len * sizeOfnvcompType(selectRunsType(max_output_len))); } temp_delta = ptr; ptr += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); // one additional buffer for delta expansion // TODO: can we get rid of this one? temp_output = ptr; ptr += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); } // allocate temp storage for cub scan using the largest size_t // this temp storage will be reused by delta and runs scans of different types temp_scan = ptr; size_t temp_scan_bytes_run = 0; size_t temp_scan_bytes_delta = 0; NVCOMP_TYPE_ONE_SWITCH( selectRunsType(max_output_len), cubDeviceScanTempSpace, temp_scan_bytes_run, max_input_len); NVCOMP_TYPE_ONE_SWITCH( outputType, cubDeviceScanTempSpace, temp_scan_bytes_delta, max_input_len); temp_scan_bytes = std::max(temp_scan_bytes_run, temp_scan_bytes_delta); ptr += CUDA_MEM_ALIGN(temp_scan_bytes); // block indices/offsets max_num_blocks = (max_output_len + RLE_ELEMS_PER_BLOCK - 1) / RLE_ELEMS_PER_BLOCK; start_ind = (size_t*)ptr; ptr += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); start_off = (size_t*)ptr; ptr += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); return nvcompSuccess; } // here we do kernel fusion void nvcompIntConfig_t::optimizeLayers() { for (auto it = layers.begin(); it != layers.end();) { if (it->scheme == NVCOMP_SCHEME_DELTA) { int valId = it->valId; int outputId = it->outputId; if (nodes.find(valId) != nodes.end() && nodes[valId].parentLayer != NULL && nodes[valId].parentLayer->scheme == NVCOMP_SCHEME_RLE) { nodes[outputId].parentLayer = nodes[valId].parentLayer; nodes[outputId].parentLayer->scheme = NVCOMP_SCHEME_RLE_DELTA; nodes[outputId].parentLayer->outputId = outputId; it = layers.erase(it); continue; } } it++; } } /* These functions may not be needed and removed to simplify codebase */ nvcompError_t nvcompSetWorkspace( nvcompHandle_t /*handle*/, void* /*workspaceStorage*/, size_t /*workspaceBytes*/) { std::cerr << "ERROR: nvcompSetWorkspace is not implemented yet!" << std::endl; return nvcompErrorNotSupported; } nvcompError_t nvcompGetWorkspaceSize(nvcompHandle_t handle, size_t* workspaceBytes) { *workspaceBytes = handles[handle].workspaceBytes; return nvcompSuccess; } nvcompError_t nvcompSetStream(nvcompHandle_t handle, cudaStream_t streamId) { handles[handle].stream = streamId; return nvcompSuccess; } nvcompError_t nvcompGetStream(nvcompHandle_t handle, cudaStream_t* streamId) { *streamId = handles[handle].stream; return nvcompSuccess; } // if the header is not packed this will shallow copy the pointer // otherwise unpack into the output buffer template <typename inputT, typename outputT> void unpackCpu( outputT** output, nvcompDataNode_t* node, const void* hdr, const void* data) { const CascadedMetadata::Header header = *static_cast<const CascadedMetadata::Header*>(hdr); if (node->packing) { for (size_t i = 0; i < header.length; ++i) { const inputT minValue = *CascadedMetadata::getMinValueLocation<inputT>(&header); (*output)[i] = unpackBytes(data, header.numBits, minValue, i); } } else { if (typeid(inputT) == typeid(outputT)) { *output = (outputT*)data; } else { for (size_t i = 0; i < header.length; i++) (*output)[i] = (outputT)((inputT*)data)[i]; } } } template <typename outputT> void unpackCpu( outputT** output, nvcompDataNode_t* node, const void* hdr, const void* data) { NVCOMP_TYPE_TWO_SWITCH_FIRST_ONLY( node->type, outputT, unpackCpu, output, node, hdr, data); } // if the header is not packed this will shallow copy the pointer if it's // accessible from the GPU otherwise copy or unpack into the output buffer template <typename inputT, typename outputT> void unpackGpu( outputT* d_output, nvcompDataNode_t* node, const void* data, const void* h_hdr, cudaStream_t stream) { void* d_input = NULL; // prepare input data cudaPointerAttributes attr; cudaError_t err = cudaPointerGetAttributes(&attr, data); if (err != cudaSuccess) { std::ostringstream oss; oss << data; throw std::runtime_error( "unpackGpu(): Failed to get pointer attributes for " + oss.str() + " due to: " + std::to_string(err)); } if (attr.type != cudaMemoryTypeUnregistered) { // memory is accessible to the GPU d_input = attr.devicePointer; } else { throw std::runtime_error("unpackGpu(): Data not accessible to the GPU"); } // Get length of run from the host-side header size_t length = static_cast<const CascadedMetadata::Header*>(h_hdr)->length; CascadedMetadata::Header header = *static_cast<const CascadedMetadata::Header*>(h_hdr); const unsigned char numBits = header.numBits; const inputT minValue = *CascadedMetadata::getMinValueLocation<inputT>(&header); const dim3 block(512); const dim3 grid(roundUpDiv(length, block.x)); if (node->packing) { unpackBytesKernel<<<grid, block, 0, stream>>>( d_input, d_output, numBits, minValue, length); } else { convertKernel<<<grid, block, 0, stream>>>( static_cast<const inputT*>(d_input), d_output, length); } } template <typename outputT> void unpackGpu( outputT* d_output, nvcompDataNode_t* node, const void* data, const void* h_hdr, cudaStream_t stream) { NVCOMP_TYPE_TWO_SWITCH_FIRST_ONLY( node->type, outputT, unpackGpu, d_output, node, data, h_hdr, stream); } template <typename outputT> nvcompError_t nvcompIntHandle_t::decompCPU( nvcompDataNode_t* node, const void** inputHdrs, const void** inputData) { size_t maxOutputSize = config->maxOutputSize; std::vector<outputT> unpacked_vals; std::vector<size_t> unpacked_runs; outputT* vals_data = NULL; size_t* runs_data = NULL; size_t vals_len; nvcompLayer_t* layer = node->parentLayer; // add BP only layer if (layer->scheme == NVCOMP_SCHEME_BP) { unpacked_vals.resize(maxOutputSize); vals_data = &unpacked_vals[0]; unpackCpu( &vals_data, layer->vals, inputHdrs[layer->valId], inputData[layer->valId]); vals_len = static_cast<const CascadedMetadata::Header*>(inputHdrs[layer->valId]) ->length; node->length = vals_len; // lazy allocation // TODO: move to allocate() if (node->ptr == NULL) node->ptr = new outputT[vals_len]; // copy and convert type if necessary for (int i = 0; i < vals_len; i++) { ((outputT*)(node->ptr))[i] = vals_data[i]; } return nvcompSuccess; } // compute vals if (layer->vals->parentLayer != NULL) { decompCPU<outputT>(layer->vals, inputHdrs, inputData); vals_data = (outputT*)layer->vals->ptr; vals_len = layer->vals->length; } else { unpacked_vals.resize(maxOutputSize); vals_data = &unpacked_vals[0]; unpackCpu( &vals_data, layer->vals, inputHdrs[layer->valId], inputData[layer->valId]); vals_len = static_cast<const CascadedMetadata::Header*>(inputHdrs[layer->valId]) ->length; } // compute runs if (layer->runs != NULL) { if (layer->runs->parentLayer != NULL) { decompCPU<size_t>(layer->runs, inputHdrs, inputData); runs_data = (size_t*)layer->runs->ptr; } else { unpacked_runs.resize(maxOutputSize); runs_data = &unpacked_runs[0]; unpackCpu( &runs_data, layer->runs, inputHdrs[layer->runId], inputData[layer->runId]); } } // decompress (this is using additional memory) std::vector<outputT> next; next.clear(); switch (layer->scheme) { case NVCOMP_SCHEME_RLE: { for (int i = 0; i < vals_len; i++) next.insert(next.end(), runs_data[i], vals_data[i]); break; } case NVCOMP_SCHEME_RLE_DELTA: { for (int i = 0; i < vals_len; i++) next.insert(next.end(), runs_data[i], vals_data[i]); for (int i = 1; i < next.size(); i++) next[i] += next[i - 1]; break; } case NVCOMP_SCHEME_DELTA: { next.resize(vals_len); next[0] = vals_data[0]; for (int i = 1; i < vals_len; i++) next[i] = next[i - 1] + vals_data[i]; break; } default: return nvcompErrorNotSupported; } node->length = next.size(); // lazy allocation // TODO: move to allocate() if (node->ptr == NULL) node->ptr = new outputT[next.size()]; // copy and convert type if necessary for (int i = 0; i < next.size(); i++) ((outputT*)(node->ptr))[i] = next[i]; return nvcompSuccess; } // Perform Cascaded decompression on the GPU. // Assumes all workspace is pre-allocated and assigned, inputHdrs and inputData // are GPU-accessible, and h_headers is CPU-accessible template <typename outputT, typename runT> nvcompError_t nvcompIntHandle_t::decompGPU( nvcompDataNode_t* node, const void** inputData, const void** h_headers, cudaStream_t stream = NULL) { // prepare device output buffer if necessary // TODO: move to the init step cudaPointerAttributes attr; outputT* out_ptr = NULL; // get typed copies of pointers to avoid casting outputT* const localOutput = static_cast<outputT*>(temp_output); outputT* const localDelta = static_cast<outputT*>(temp_delta); runT* const localRun = static_cast<runT*>(temp_run); if (node->ptr == nullptr) { throw std::runtime_error( "nvcompIntHandle_t::decompGPU(): Got node with null ptr."); } cudaError_t err = cudaPointerGetAttributes(&attr, node->ptr); if (err != cudaSuccess) { throw std::runtime_error( "nvcompIntHandle_t::decompGPU(): Failed to get cuda pointer " "attributes: " + std::to_string(err)); } if (attr.type != cudaMemoryTypeUnregistered) { // direct access is possible out_ptr = (outputT*)attr.devicePointer; } else { throw std::runtime_error("nvcompIntHandle_t::decompGPU(): Workspace memory " "not accessible to GPU."); } nvcompLayer_t* layer = node->parentLayer; if (layer->scheme == NVCOMP_SCHEME_BP) { // We assume this is the only layer, and we just do it and exit layer->vals->ptr = out_ptr; unpackGpu( (outputT*)layer->vals->ptr, layer->vals, inputData[layer->valId], h_headers[layer->valId], stream); layer->vals->length = static_cast<const CascadedMetadata::Header*>(h_headers[layer->valId]) ->length; assert(layer->vals->length <= max_input_len); return nvcompSuccess; } // prepare inputs std::swap(temp_output, temp_val); if (layer->vals->parentLayer != NULL) { layer->vals->ptr = localOutput; // when recursing, swap decompGPU<outputT, runT>(layer->vals, inputData, h_headers, stream); } else { // unpack RLE values layer->vals->ptr = localOutput; unpackGpu( (outputT*)layer->vals->ptr, layer->vals, inputData[layer->valId], h_headers[layer->valId], stream); layer->vals->length = static_cast<const CascadedMetadata::Header*>(h_headers[layer->valId]) ->length; assert(layer->vals->length <= max_input_len); } if (layer->runs != nullptr) { if (layer->runs->parentLayer != nullptr) { throw std::runtime_error("decompGPU(): Runs cannot have parent layers."); } else { // unpack RLE runs layer->runs->ptr = localRun; unpackGpu( (runT*)layer->runs->ptr, layer->runs, inputData[layer->runId], h_headers[layer->runId], stream); layer->runs->length = static_cast<const CascadedMetadata::Header*>( h_headers[layer->runId]) ->length; } } outputT* d_vals = (outputT*)layer->vals->ptr; const size_t input_size = layer->vals->length; assert(input_size <= max_input_len); if (layer->scheme == NVCOMP_SCHEME_DELTA) { assert(out_ptr != d_vals); cub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, d_vals, out_ptr, input_size, stream); } else { // must be RLE of some form runT* d_runs = (runT*)layer->runs->ptr; assert(layer->runs->length == input_size); if (layer->scheme == NVCOMP_SCHEME_RLE_DELTA) { const dim3 block(512); const dim3 grid(roundUpDiv(input_size, block.x)); vecMultKernel<<<grid, block, 0, stream>>>( d_vals, d_runs, localDelta, input_size); // inclusive scan to compute Delta sums cub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, localDelta, localDelta, input_size, stream); } // inclusive scan to compute RLE offsets // TODO: could be merged with the unpack kernel? cub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, d_runs, d_runs, input_size, stream); const size_t output_length = node->length; // precompute start/end boundaries for each CUDA block size_t output_grid = (output_length + RLE_ELEMS_PER_BLOCK - 1) / RLE_ELEMS_PER_BLOCK; size_t output_grid_block = (output_grid + RLE_THREAD_BLOCK - 1) / RLE_THREAD_BLOCK; searchBlockBoundaries<runT, RLE_THREAD_BLOCK, RLE_ELEMS_PER_THREAD> <<<output_grid_block, RLE_THREAD_BLOCK, 0, stream>>>( start_ind, start_off, output_grid, input_size, d_runs); // expand RLE and apply Delta: buf[r] -> buf[r+1] // TODO: implement macro to look nicer? switch (layer->scheme) { case NVCOMP_SCHEME_RLE_DELTA: expandRLEDelta< outputT, outputT, runT, RLE_THREAD_BLOCK, RLE_ELEMS_PER_THREAD, true><<<output_grid, RLE_THREAD_BLOCK, 0, stream>>>( (outputT*)out_ptr, output_length, d_vals, d_runs, localDelta, start_ind, start_off); break; case NVCOMP_SCHEME_RLE: expandRLEDelta< outputT, outputT, runT, RLE_THREAD_BLOCK, RLE_ELEMS_PER_THREAD, false><<<output_grid, RLE_THREAD_BLOCK, 0, stream>>>( (outputT*)out_ptr, output_length, d_vals, d_runs, localDelta, start_ind, start_off); break; default: throw std::runtime_error( "Invalid rle scheme: " + std::to_string(layer->scheme)); } } return nvcompSuccess; } nvcompError_t nvcompSetNodeLength(nvcompHandle_t handle, int nodeId, size_t output_length) { nvcompIntHandle_t& h = handles[handle]; nvcompIntConfig_t& c = *h.config; c.nodes[nodeId].length = output_length; return nvcompSuccess; } // Main function that sets up Cascaded decompression from the old API. // the new cascaded decompression API call is just a wrapper around this (though // heavily modified to be asynchronous). template <typename outputType, typename runType> nvcompError_t nvcompDecompressLaunch( nvcompHandle_t handle, void* outputData, const size_t outputSize, const void** inputData, const void** h_headers) { nvcompIntHandle_t& h = handles[handle]; nvcompIntConfig_t& c = *h.config; // TODO: assign all the buffers nvcompDataNode_t* terminal_node = &c.nodes[c.outputId]; terminal_node->ptr = outputData; nvcompError_t ret = h.decompGPU<outputType, runType>( terminal_node, inputData, h_headers, h.stream); const size_t neededBytes = terminal_node->length * sizeof(outputType); if (outputSize < neededBytes) { std::cerr << "Insufficient space to write decompressed date: given " << outputSize << " bytes but need " << neededBytes << " bytes." << std::endl; return nvcompErrorInvalidValue; } // this is to enable the correct result for multi-chunk execuation for (auto it = c.nodes.begin(); it != c.nodes.end(); it++) { it->second.length = 0; } return ret; } nvcompError_t nvcompDecompressLaunch( nvcompHandle_t handle, const size_t numUncompressedElements, void* const outputData, const size_t outputSize, const void** const inputData, const void** const h_headers) { const nvcompType_t outputType = handles[handle].config->outputType; const nvcompType_t runType = selectRunsType(numUncompressedElements); NVCOMP_TYPE_TWO_SWITCH_RETURN( outputType, runType, nvcompDecompressLaunch, handle, outputData, outputSize, inputData, h_headers); } nvcompError_t nvcompDestroyHandle(nvcompHandle_t handle) { nvcompIntHandle_t& h = handles[handle]; nvcompIntConfig_t& c = *h.config; // free temp memory h.release(); // clear all local nodes attached to this config c.nodes.clear(); // remove the handle from the list handles.erase(handle); return nvcompSuccess; } // Modified version of handle creation function from previous API to now be // asynchronous Assumes workspaceStorage is already allocated. nvcompError_t nvcompCreateHandleAsync( nvcompHandle_t* handle, nvcompIntConfig_t* const config, void* workspaceStorage, const size_t workspaceBytes, cudaStream_t stream) { std::lock_guard<std::mutex> guard(handle_mutex); nvcompIntConfig_t& c = *config; // first - optimize the plan c.optimizeLayers(); // assign pointers - at this point the nodes map is set for (auto it = c.layers.begin(); it != c.layers.end(); it++) { it->vals = &c.nodes[it->valId]; it->output = &c.nodes[it->outputId]; if (it->runId >= 0) it->runs = &c.nodes[it->runId]; } if (workspaceBytes < c.getWorkspaceBytes()) { std::cerr << "Insufficient workspace size: got " << workspaceBytes << " but need " << c.getWorkspaceBytes() << std::endl; return nvcompErrorInvalidValue; } // find the next available id nvcompHandle_t id = handles.find_next(); *handle = id; nvcompIntHandle_t& h = handles[id]; h.config = config; h.stream = stream; h.workspaceBytes = workspaceBytes; h.workspaceStorage = workspaceStorage; h.allocateAsync(); return nvcompSuccess; } } // namespace nvcomp using namespace nvcomp; nvcompError_t nvcompCascadedDecompressGetMetadata( const void* in_ptr, const size_t in_bytes, void** metadata_ptr, cudaStream_t stream) { try { CHECK_NOT_NULL(in_ptr); CHECK_NOT_NULL(metadata_ptr); CascadedMetadataOnGPU gpuMetadata((void*)in_ptr, in_bytes); *metadata_ptr = new CascadedMetadata(gpuMetadata.copyToHost()); CudaUtils::sync(stream); } catch (const std::exception& e) { return Check::exception_to_error( e, "nvcompCascadedDecompressGetMetadata()"); } return nvcompSuccess; } void nvcompCascadedDecompressDestroyMetadata(void* const metadata_ptr) { CascadedMetadata* metadata = static_cast<CascadedMetadata*>(metadata_ptr); ::operator delete(metadata); } // TODO: improve estimate with a more sophistocated approach. nvcompError_t nvcompCascadedDecompressGetTempSize( const void* metadata_ptr, size_t* temp_bytes) { try { CHECK_NOT_NULL(metadata_ptr); CHECK_NOT_NULL(temp_bytes); CascadedMetadata* metadata = (CascadedMetadata*)metadata_ptr; nvcompIntConfig_t c = generateConfig(metadata); // first - optimize the plan c.optimizeLayers(); // assign pointers - at this point the nodes map is set for (auto it = c.layers.begin(); it != c.layers.end(); it++) { it->vals = &c.nodes[it->valId]; it->output = &c.nodes[it->outputId]; if (it->runId >= 0) { it->runs = &c.nodes[it->runId]; } } // Return the required temp storage size *temp_bytes = c.getWorkspaceBytes(); } catch (const std::exception& e) { return Check::exception_to_error( e, "nvcompCascadedDecompressGetTempSize()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedDecompressGetOutputSize( const void* metadata_ptr, size_t* output_bytes) { try { *output_bytes = static_cast<const CascadedMetadata*>(metadata_ptr) ->getUncompressedSize(); } catch (const std::exception& e) { return Check::exception_to_error( e, "nvcompCascadedDecompressionGetOutputSize()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedDecompressAsync( const void* const in_ptr, const size_t in_bytes, void* const temp_ptr, const size_t temp_bytes, const void* const metadata_ptr, void* const out_ptr, const size_t out_bytes, cudaStream_t stream) { nvcompHandle_t handle = -1; try { CHECK_NOT_NULL(metadata_ptr); const CascadedMetadata* const metadata = static_cast<const CascadedMetadata*>(metadata_ptr); if (in_bytes < metadata->getCompressedSize()) { throw NVCompException( nvcompErrorInvalidValue, "in_bytes is smaller than compressed data size: " + std::to_string(in_bytes) + " < " + std::to_string(metadata->getCompressedSize())); } nvcompIntConfig_t c = generateConfig(metadata); // first - optimize the plan c.optimizeLayers(); // assign pointers - at this point the nodes map is set for (auto it = c.layers.begin(); it != c.layers.end(); it++) { it->vals = &c.nodes[it->valId]; it->output = &c.nodes[it->outputId]; if (it->runId >= 0) { it->runs = &c.nodes[it->runId]; } } CHECK_API_CALL( nvcompCreateHandleAsync(&handle, &c, temp_ptr, temp_bytes, stream)); assert(handle >= 0); // Pointers to different portions of compressed data std::vector<void*> inputData(metadata->getNumInputs(), nullptr); std::vector<CascadedMetadata::Header> inputHdrs; std::vector<CascadedMetadata::Header*> cpuHdrs; for (size_t i = 0; i < metadata->getNumInputs(); i++) { inputHdrs.emplace_back(metadata->getHeader(i)); inputData[i] = &((char*)in_ptr)[metadata->getDataOffset(i)]; } for (CascadedMetadata::Header& hdr : inputHdrs) { cpuHdrs.emplace_back(&hdr); } nvcompDecompressLaunch( handle, metadata->getNumUncompressedElements(), out_ptr, out_bytes, (const void**)inputData.data(), (const void**)cpuHdrs.data()); nvcompDestroyHandle(handle); } catch (const std::exception& e) { if (handle >= 0) { nvcompDestroyHandle(handle); } return Check::exception_to_error(e, "nvcompCascadedDecompressAsync()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedCompressGetTempSize( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const format_opts, size_t* const temp_bytes) { try { checkCompressSize(in_bytes); nvcompCascadedCompressionGPU::computeWorkspaceSize( in_ptr, in_bytes, in_type, format_opts, temp_bytes); } catch (const std::exception& e) { return Check::exception_to_error(e, "nvcompCascadedCompressGetTempSize()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedCompressGetOutputSize( const void* in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* format_opts, void* const temp_ptr, const size_t temp_bytes, size_t* const out_bytes, const int exact_out_bytes) { try { checkCompressSize(in_bytes); if (exact_out_bytes) { throw std::runtime_error("Exact output bytes is unimplemented at " "this time."); } nvcompCascadedCompressionGPU::generateOutputUpperBound( in_ptr, in_bytes, in_type, format_opts, temp_ptr, temp_bytes, out_bytes); } catch (const std::exception& e) { return Check::exception_to_error( e, "nvcompCascadedCompressGetOutputSize()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedCompressAsync( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, const nvcompCascadedFormatOpts* const format_opts, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, size_t* const out_bytes, cudaStream_t stream) { try { checkCompressSize(in_bytes); CHECK_NOT_NULL(out_bytes); if (*out_bytes == 0) { throw NVCompException( nvcompErrorInvalidValue, "Output size cannot be zero. Make sure " "to set the size of out_bytes to size of output space allocated " "for compressed output."); } nvcompCascadedCompressionGPU::compressAsync( in_ptr, in_bytes, in_type, format_opts, temp_ptr, temp_bytes, out_ptr, out_bytes, stream); } catch (const std::exception& e) { return Check::exception_to_error(e, "nvcompCascadedCompressAsync()"); } return nvcompSuccess; } /***************************************************************************** * Definitions of API calls for automatically selected compression ****************************************************************************/ nvcompError_t nvcompCascadedCompressAutoGetTempSize( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, size_t* const temp_bytes) { // Assume the scheme that requires the most temp space nvcompCascadedFormatOpts biggest_opts; biggest_opts.num_RLEs = 2; biggest_opts.num_deltas = 2; biggest_opts.use_bp = 1; return API_WRAPPER(nvcompCascadedCompressGetTempSize( in_ptr, in_bytes, in_type, &biggest_opts, temp_bytes), "nvcompCascadedCompressAutoGetTempSize()"); } nvcompError_t nvcompCascadedCompressAutoGetOutputSize( const void* in_ptr, size_t in_bytes, nvcompType_t in_type, void* temp_ptr, size_t temp_bytes, size_t* out_bytes) { // Assume the scheme that can result in the largest output nvcompCascadedFormatOpts biggest_opts; biggest_opts.num_RLEs = 2; biggest_opts.num_deltas = 2; biggest_opts.use_bp = 1; return API_WRAPPER(nvcompCascadedCompressGetOutputSize( in_ptr, in_bytes, in_type, &biggest_opts, temp_ptr, temp_bytes, out_bytes, 0), "nvcompCascadedCompressAutoGetOutputSize()"); } nvcompError_t nvcompCascadedCompressAuto( const void* in_ptr, size_t in_bytes, nvcompType_t in_type, void* temp_ptr, size_t temp_bytes, void* out_ptr, size_t* out_bytes, cudaStream_t stream) { try { nvcompCascadedSelectorOpts selector_opts; selector_opts.sample_size = 1024; selector_opts.num_samples = 100; size_t type_bytes = sizeOfnvcompType(in_type); // Adjust sample size if input is too small if(in_bytes < (selector_opts.sample_size * selector_opts.num_samples * type_bytes)) { selector_opts.sample_size = in_bytes / (10*type_bytes); selector_opts.num_samples = 10; } nvcompCascadedFormatOpts format_opts; double est_ratio; // Run selector to get format opts for compression CHECK_API_CALL(nvcompCascadedSelectorSelectConfig( in_ptr, in_bytes, in_type, selector_opts, temp_ptr, temp_bytes, &format_opts, &est_ratio, stream)); CudaUtils::sync(stream); // Run compression CHECK_API_CALL(nvcompCascadedCompressAsync( in_ptr, in_bytes, in_type, &format_opts, temp_ptr, temp_bytes, out_ptr, out_bytes, stream)); } catch (const std::exception& e) { return Check::exception_to_error(e, "nvcompCascadedCompressAuto()"); } return nvcompSuccess; } /***************************************************************************** * Definitions of API calls for automatically selected compression ****************************************************************************/ nvcompError_t nvcompCascadedCompressAutoGetTempSize( const void* const in_ptr, const size_t in_bytes, const nvcompType_t in_type, size_t* const temp_bytes) { // Assume the scheme that requires the most temp space nvcompCascadedFormatOpts biggest_opts; biggest_opts.num_RLEs = 2; biggest_opts.num_deltas = 2; biggest_opts.use_bp = 1; return nvcompCascadedCompressGetTempSize( in_ptr, in_bytes, in_type, &biggest_opts, temp_bytes); } nvcompError_t nvcompCascadedCompressAutoGetOutputSize( const void* in_ptr, size_t in_bytes, nvcompType_t in_type, void* temp_ptr, size_t temp_bytes, size_t* out_bytes) { // Assume the scheme that can result in the largest output nvcompCascadedFormatOpts biggest_opts; biggest_opts.num_RLEs = 2; biggest_opts.num_deltas = 2; biggest_opts.use_bp = 1; return nvcompCascadedCompressGetOutputSize( in_ptr, in_bytes, in_type, &biggest_opts, temp_ptr, temp_bytes, out_bytes, 0); } nvcompError_t nvcompCascadedCompressAuto( const void* in_ptr, size_t in_bytes, nvcompType_t in_type, void* temp_ptr, size_t temp_bytes, void* out_ptr, size_t* out_bytes, cudaStream_t stream) { nvcompCascadedSelectorOpts selector_opts; selector_opts.sample_size = 1024; selector_opts.num_samples = 100; size_t type_bytes = sizeOfnvcompType(in_type); // Adjust sample size if input is too small if(in_bytes < (selector_opts.sample_size * selector_opts.num_samples * type_bytes)) { selector_opts.sample_size = in_bytes / (10*type_bytes); selector_opts.num_samples = 10; } nvcompCascadedFormatOpts format_opts; nvcompError_t err; double est_ratio; // Run selector to get format opts for compression err = nvcompCascadedSelectorSelectConfig( in_ptr, in_bytes, in_type, selector_opts, temp_ptr, temp_bytes, &format_opts, &est_ratio, stream); if(err != nvcompSuccess) { return err; } cudaStreamSynchronize(stream); // Run compression err = nvcompCascadedCompressAsync( in_ptr, in_bytes, in_type, &format_opts, temp_ptr, temp_bytes, out_ptr, out_bytes, stream); return err; }
8db02be9b9dc7e331847ff0eeedf1123a7522dbd.hip
// !!! This is a file automatically generated by hipify!!! /* * * Multiplicacin de Matrices en CUDA * */ #include <stdio.h> #include <stdlib.h> #include <cassert> #include <time.h> //PP#include <hip/hip_runtime.h> #define SHMEM_SIZE 32 * 32 * 4 // guardar en L1 256 float's /* Utilidad para checar errores de CUDA */ void checkCUDAError(const char*); // Kernel de multiplicacin de matrices __global__ void matrix_multiplication(float *d_A, float *d_B, float *d_C, int N, int tile_size) { // memoria compartida __shared__ float A[SHMEM_SIZE]; __shared__ float B[SHMEM_SIZE]; // indices de hilos y bloques int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; // calculando columna y fila int row = by * tile_size + ty; int col = bx * tile_size + tx; // inticailizando suma temporal float temp = 0.0; // Realizando operaciones for (int i = 0; i < (N / tile_size); i++) { // cargando memoria compartida con porcin de las "matrices" A[(ty * tile_size) + tx] = d_A[row * N + (i * tile_size + tx)]; B[(ty * tile_size) + tx] = d_B[(i * tile_size * N + ty * N) + col]; // esperando hilos para que todo est cargado __syncthreads(); // calculando temp for (int j = 0; j < tile_size; j++) { temp += A[(ty * tile_size + j)] * B[j * tile_size + tx]; } // esperando hilos para evitar que se cargue nueva informacin antes // de que todos los hilos terminen de acceder a la memoria compartida __syncthreads(); } d_C[row * N + col] = temp; } // Verificando resultado en el CPU void verify_result(float *A, float *B, float *C, int N) { for (unsigned int i = 0; i < N; i++) { for (unsigned int j = 0; j < N; j++) { float sum = 0; for (unsigned int k = 0; k < N; k++) { sum += A[i * N + k] * B[k * N + j]; } // check against GPU result assert(sum == C[i * N + j]); } } } // Main routine int main(int argc, char *argv[]) { float *h_A, *h_B, *h_C; // matrices en CPU float *d_A, *d_B, *d_C; // matrices en GPU if (argc < 2) { printf("usage: mul <matrix-dimension-power-2>\n"); exit(-1); } if (atoi(argv[1]) < 5) { printf("Please provide a dimension higher than 5\n"); } int N = 1 << atoi(argv[1]); // filas y renglones int MTX_SIZE = N * N; // matriz de tamao size_t size = MTX_SIZE * sizeof(float); // tamao de matriz en bytes // Reservar memoria en CPU h_A = (float *) malloc(size); h_B = (float *) malloc(size); h_C = (float *) malloc(size); // Reservar memoria en GPU hipMalloc((void **) &d_A, size); hipMalloc((void **) &d_B, size); hipMalloc((void **) &d_C, size); // inicializando matrices for (int i = 0; i < MTX_SIZE; i++) { h_A[i] = (float)(rand() % 100); h_B[i] = (float)(rand() % 100); h_C[i] = (float)0; } // copiando de CPU a GPU hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); // verificando tiempo de ejecucin time_t t1, t2; // corriendo kernel en el GPU int n_threads = 32; int n_blocks = N / n_threads; dim3 dimBlock(n_threads, n_threads); dim3 dimGrid(n_blocks, n_blocks); t1 = time(NULL); hipLaunchKernelGGL(( matrix_multiplication), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, N, n_threads); // esperando a que acaben los hilos hipDeviceSynchronize(); checkCUDAError("kernel invocation"); // timing execution t2 = time(NULL); printf("Execution time: %f sec\n", difftime(t2, t1)); // copiando resultado de regreso al CPU hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); checkCUDAError("memcpy"); // verificando resultado // printf("Verifying result in CPU...\n"); // verify_result(h_A, h_B, h_C, N); // printf("Success!\n"); // Liberar memoria free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); } // Utility function to check for and report CUDA errors void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
8db02be9b9dc7e331847ff0eeedf1123a7522dbd.cu
/* * * Multiplicación de Matrices en CUDA * */ #include <stdio.h> #include <stdlib.h> #include <cassert> #include <time.h> //PP#include <cuda.h> #define SHMEM_SIZE 32 * 32 * 4 // guardar en L1 256 float's /* Utilidad para checar errores de CUDA */ void checkCUDAError(const char*); // Kernel de multiplicación de matrices __global__ void matrix_multiplication(float *d_A, float *d_B, float *d_C, int N, int tile_size) { // memoria compartida __shared__ float A[SHMEM_SIZE]; __shared__ float B[SHMEM_SIZE]; // indices de hilos y bloques int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; // calculando columna y fila int row = by * tile_size + ty; int col = bx * tile_size + tx; // inticailizando suma temporal float temp = 0.0; // Realizando operaciones for (int i = 0; i < (N / tile_size); i++) { // cargando memoria compartida con porción de las "matrices" A[(ty * tile_size) + tx] = d_A[row * N + (i * tile_size + tx)]; B[(ty * tile_size) + tx] = d_B[(i * tile_size * N + ty * N) + col]; // esperando hilos para que todo esté cargado __syncthreads(); // calculando temp for (int j = 0; j < tile_size; j++) { temp += A[(ty * tile_size + j)] * B[j * tile_size + tx]; } // esperando hilos para evitar que se cargue nueva información antes // de que todos los hilos terminen de acceder a la memoria compartida __syncthreads(); } d_C[row * N + col] = temp; } // Verificando resultado en el CPU void verify_result(float *A, float *B, float *C, int N) { for (unsigned int i = 0; i < N; i++) { for (unsigned int j = 0; j < N; j++) { float sum = 0; for (unsigned int k = 0; k < N; k++) { sum += A[i * N + k] * B[k * N + j]; } // check against GPU result assert(sum == C[i * N + j]); } } } // Main routine int main(int argc, char *argv[]) { float *h_A, *h_B, *h_C; // matrices en CPU float *d_A, *d_B, *d_C; // matrices en GPU if (argc < 2) { printf("usage: mul <matrix-dimension-power-2>\n"); exit(-1); } if (atoi(argv[1]) < 5) { printf("Please provide a dimension higher than 5\n"); } int N = 1 << atoi(argv[1]); // filas y renglones int MTX_SIZE = N * N; // matriz de tamaño size_t size = MTX_SIZE * sizeof(float); // tamaño de matriz en bytes // Reservar memoria en CPU h_A = (float *) malloc(size); h_B = (float *) malloc(size); h_C = (float *) malloc(size); // Reservar memoria en GPU cudaMalloc((void **) &d_A, size); cudaMalloc((void **) &d_B, size); cudaMalloc((void **) &d_C, size); // inicializando matrices for (int i = 0; i < MTX_SIZE; i++) { h_A[i] = (float)(rand() % 100); h_B[i] = (float)(rand() % 100); h_C[i] = (float)0; } // copiando de CPU a GPU cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // verificando tiempo de ejecución time_t t1, t2; // corriendo kernel en el GPU int n_threads = 32; int n_blocks = N / n_threads; dim3 dimBlock(n_threads, n_threads); dim3 dimGrid(n_blocks, n_blocks); t1 = time(NULL); matrix_multiplication<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, N, n_threads); // esperando a que acaben los hilos cudaThreadSynchronize(); checkCUDAError("kernel invocation"); // timing execution t2 = time(NULL); printf("Execution time: %f sec\n", difftime(t2, t1)); // copiando resultado de regreso al CPU cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); checkCUDAError("memcpy"); // verificando resultado // printf("Verifying result in CPU...\n"); // verify_result(h_A, h_B, h_C, N); // printf("Success!\n"); // Liberar memoria free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } // Utility function to check for and report CUDA errors void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
0e5ff9e4aca4ca0ffdd7587e6227a6a6109eaf24.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zgeelltmv.cu, normal z -> s, Mon Jun 25 18:24:24 2018 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 // ELL SpMV kernel //Michael Garland template<bool betazero> __global__ void sgeelltmv_kernel( int num_rows, int num_cols, int num_cols_per_row, float alpha, float * dval, magma_index_t * dcolind, float * dx, float beta, float * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; float val = dval [ num_rows * n + row ]; //if ( val != MAGMA_S_ZERO ) dot += val * dx[col ]; } if (betazero) { dy[ row ] = dot * alpha; } else { dy[ row ] = dot * alpha + beta * dy [ row ]; } } } // shifted ELL SpMV kernel //Michael Garland __global__ void sgeelltmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, float alpha, float lambda, float * dval, magma_index_t * dcolind, float * dx, float beta, int offset, int blocksize, magma_index_t * addrows, float * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; float val = dval [ num_rows * n + row ]; if ( val != 0) dot += val * dx[col ]; } if ( row < blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_sgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaFloat_ptr dx, float beta, magmaFloat_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; if (beta == MAGMA_S_ZERO) { hipLaunchKernelGGL(( sgeelltmv_kernel<true>), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { hipLaunchKernelGGL(( sgeelltmv_kernel<false>), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha float scalar multiplier @param[in] lambda float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgeelltmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, float lambda, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaFloat_ptr dx, float beta, magma_int_t offset, magma_int_t blocksize, magmaIndex_ptr addrows, magmaFloat_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; float tmp_shift; //magma_ssetvector(1,&lambda,1,&tmp_shift,1); tmp_shift = lambda; hipLaunchKernelGGL(( sgeelltmv_kernel_shift), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx, beta, offset, blocksize, addrows, dy ); return MAGMA_SUCCESS; }
0e5ff9e4aca4ca0ffdd7587e6227a6a6109eaf24.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zgeelltmv.cu, normal z -> s, Mon Jun 25 18:24:24 2018 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 // ELL SpMV kernel //Michael Garland template<bool betazero> __global__ void sgeelltmv_kernel( int num_rows, int num_cols, int num_cols_per_row, float alpha, float * dval, magma_index_t * dcolind, float * dx, float beta, float * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; float val = dval [ num_rows * n + row ]; //if ( val != MAGMA_S_ZERO ) dot += val * dx[col ]; } if (betazero) { dy[ row ] = dot * alpha; } else { dy[ row ] = dot * alpha + beta * dy [ row ]; } } } // shifted ELL SpMV kernel //Michael Garland __global__ void sgeelltmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, float alpha, float lambda, float * dval, magma_index_t * dcolind, float * dx, float beta, int offset, int blocksize, magma_index_t * addrows, float * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; if (row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; float val = dval [ num_rows * n + row ]; if ( val != 0) dot += val * dx[col ]; } if ( row < blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_sgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaFloat_ptr dx, float beta, magmaFloat_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; if (beta == MAGMA_S_ZERO) { sgeelltmv_kernel<true><<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { sgeelltmv_kernel<false><<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha float scalar multiplier @param[in] lambda float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgeelltmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, float lambda, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaFloat_ptr dx, float beta, magma_int_t offset, magma_int_t blocksize, magmaIndex_ptr addrows, magmaFloat_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; float tmp_shift; //magma_ssetvector(1,&lambda,1,&tmp_shift,1); tmp_shift = lambda; sgeelltmv_kernel_shift<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx, beta, offset, blocksize, addrows, dy ); return MAGMA_SUCCESS; }
c4cfc25b46c836453d4d3657bd9d556b77ffdb29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stencil_3d.hpp" #include "helper_math.h" void stencil_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int halo, hipStream_t stream); __global__ void __stencil_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int halo); void stencil_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int halo, hipStream_t stream) { dim3 blockDim(8, 8, 8); dim3 gridDim( (dimx/blockDim.x + ((dimx%blockDim.x)?1:0)), (dimy/blockDim.y + ((dimy%blockDim.y)?1:0)), (dimz/blockDim.z + ((dimz%blockDim.z)?1:0)) ); size_t sharedMemSize = (blockDim.x+2*halo)*(blockDim.y+2*halo)*(blockDim.z+2*halo)*sizeof(float); hipLaunchKernelGGL(( __stencil_3d), dim3(gridDim), dim3(blockDim), sharedMemSize, stream, deviceSrc, deviceDst, dimx, dimy, dimz, halo); } #define at(x, y, z, dimx, dimy, dimz) ( clamp((int)z, 0, dimz-1)*dimy*dimx + \ clamp((int)y, 0, dimy-1)*dimx + \ clamp((int)x, 0, dimx-1) ) __global__ void __stencil_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int halo) { extern __shared__ float sharedMemSrc[]; int shared_index_1d, global_index_1d, index_1d; int3 shared_index_3d, global_index_3d, index_3d; // Multi batch reading here int3 sharedMemDim = make_int3(blockDim.x+2*halo, blockDim.y+2*halo, blockDim.z+2*halo); int sharedMemSize = sharedMemDim.x*sharedMemDim.y*sharedMemDim.z; int3 blockSizeDim = make_int3(blockDim.x+0*halo, blockDim.y+0*halo, blockDim.z+0*halo); int blockSize = blockSizeDim.x*blockSizeDim.y*blockSizeDim.z; int numBatches = sharedMemSize/blockSize + ((sharedMemSize%blockSize)?1:0); for(int batch=0; batch<numBatches; batch++) { shared_index_1d = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x + blockSize*batch; //Magic is here [email protected] shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) % (blockDim.x+2*halo), (shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) / (blockDim.x+2*halo), (shared_index_1d / ((blockDim.y+2*halo)*(blockDim.x+2*halo))) ); global_index_3d = make_int3(blockIdx.x * blockDim.x + shared_index_3d.x - halo, blockIdx.y * blockDim.y + shared_index_3d.y - halo, blockIdx.z * blockDim.z + shared_index_3d.z - halo); global_index_1d = global_index_3d.z * dimy * dimx + global_index_3d.y * dimx + global_index_3d.x; if (shared_index_3d.z < (blockDim.z + 2*halo)) { if(global_index_3d.z >= 0 && global_index_3d.z < dimz && global_index_3d.y >= 0 && global_index_3d.y < dimy && global_index_3d.x >= 0 && global_index_3d.x < dimx) { sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = deviceSrc[global_index_1d]; } else { sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = -1; } } __syncthreads(); } // Stencil processing here float result = sharedMemSrc[at(threadIdx.x + halo, threadIdx.y + halo, threadIdx.z + halo, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)]; // Single pass writing here index_3d = make_int3(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y, blockIdx.z * blockDim.z + threadIdx.z); index_1d = index_3d.z * dimy * dimx + index_3d.y * dimx + index_3d.x; if (index_3d.z < dimz && index_3d.y < dimy && index_3d.x < dimx) { deviceDst[index_1d] = result; } }
c4cfc25b46c836453d4d3657bd9d556b77ffdb29.cu
#include "stencil_3d.hpp" #include "helper_math.h" void stencil_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int halo, cudaStream_t stream); __global__ void __stencil_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int halo); void stencil_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int halo, cudaStream_t stream) { dim3 blockDim(8, 8, 8); dim3 gridDim( (dimx/blockDim.x + ((dimx%blockDim.x)?1:0)), (dimy/blockDim.y + ((dimy%blockDim.y)?1:0)), (dimz/blockDim.z + ((dimz%blockDim.z)?1:0)) ); size_t sharedMemSize = (blockDim.x+2*halo)*(blockDim.y+2*halo)*(blockDim.z+2*halo)*sizeof(float); __stencil_3d<<<gridDim, blockDim, sharedMemSize, stream>>> (deviceSrc, deviceDst, dimx, dimy, dimz, halo); } #define at(x, y, z, dimx, dimy, dimz) ( clamp((int)z, 0, dimz-1)*dimy*dimx + \ clamp((int)y, 0, dimy-1)*dimx + \ clamp((int)x, 0, dimx-1) ) __global__ void __stencil_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int halo) { extern __shared__ float sharedMemSrc[]; int shared_index_1d, global_index_1d, index_1d; int3 shared_index_3d, global_index_3d, index_3d; // Multi batch reading here int3 sharedMemDim = make_int3(blockDim.x+2*halo, blockDim.y+2*halo, blockDim.z+2*halo); int sharedMemSize = sharedMemDim.x*sharedMemDim.y*sharedMemDim.z; int3 blockSizeDim = make_int3(blockDim.x+0*halo, blockDim.y+0*halo, blockDim.z+0*halo); int blockSize = blockSizeDim.x*blockSizeDim.y*blockSizeDim.z; int numBatches = sharedMemSize/blockSize + ((sharedMemSize%blockSize)?1:0); for(int batch=0; batch<numBatches; batch++) { shared_index_1d = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x + blockSize*batch; //Magic is here [email protected] shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) % (blockDim.x+2*halo), (shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) / (blockDim.x+2*halo), (shared_index_1d / ((blockDim.y+2*halo)*(blockDim.x+2*halo))) ); global_index_3d = make_int3(blockIdx.x * blockDim.x + shared_index_3d.x - halo, blockIdx.y * blockDim.y + shared_index_3d.y - halo, blockIdx.z * blockDim.z + shared_index_3d.z - halo); global_index_1d = global_index_3d.z * dimy * dimx + global_index_3d.y * dimx + global_index_3d.x; if (shared_index_3d.z < (blockDim.z + 2*halo)) { if(global_index_3d.z >= 0 && global_index_3d.z < dimz && global_index_3d.y >= 0 && global_index_3d.y < dimy && global_index_3d.x >= 0 && global_index_3d.x < dimx) { sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = deviceSrc[global_index_1d]; } else { sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = -1; } } __syncthreads(); } // Stencil processing here float result = sharedMemSrc[at(threadIdx.x + halo, threadIdx.y + halo, threadIdx.z + halo, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)]; // Single pass writing here index_3d = make_int3(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y, blockIdx.z * blockDim.z + threadIdx.z); index_1d = index_3d.z * dimy * dimx + index_3d.y * dimx + index_3d.x; if (index_3d.z < dimz && index_3d.y < dimy && index_3d.x < dimx) { deviceDst[index_1d] = result; } }
376b4d5077b174f4e07544bc2ab14e1dc9ff4cf2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GSRBCuda.h" #include "GSRBConstants.h" #define DEFAULT_THRESHOLD 4000 #define BLOCKSIZE 64 #define TILESIZE 16 #define cudaCheck(x) _cudaCheck(x, #x ,__FILE__, __LINE__) template<typename T> void _cudaCheck(T e, const char* func, const char* call, const int line){ if(e != hipSuccess){ printf("\"%s\" at %d in %s\n\treturned %d\n-> %s\n", func, line, call, (int)e, hipGetErrorString(e)); exit(EXIT_FAILURE); } } __global__ void GSRBKernel(double* phi, double* phi_new, double* rhs, double* alpha, double* beta_i, double* beta_j, double* beta_k, double* lambda, int color) { int i, j, k; i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= pencil-1 || i == 0) { return; } for (k=1; k<pencil-1; k++) { for(j=1; j<pencil-1; j++) { int ijk = i + j*pencil + k*plane; // if ((i+j+k+color) % 2 == 0) { double helmholtz = alpha[ijk]*phi[ijk] - H2INV*( beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] ) - beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] ) + beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] ) - beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] ) + beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] ) - beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] ) ); phi_new[ijk] = phi[ijk] - lambda[ijk]*(helmholtz-rhs[ijk]); } } } } void GSRBCuda(double* phi, double* phi_new, double* rhs, double* alpha, double* beta_i, double* beta_j, double* beta_k, double* lambda) { //CUDA Buffers double* phi_device ; double* phi_new_device; double* rhs_device ; double* alpha_device ; double* beta_i_device ; double* beta_j_device ; double* beta_k_device ; double* lambda_device ; double* tmp; // Init Memory on GPU // Cuda Memory Management cudaCheck(hipMalloc((void**) &phi_device , grid * sizeof(double))); cudaCheck(hipMalloc((void**) &phi_new_device, grid * sizeof(double))); cudaCheck(hipMalloc((void**) &rhs_device , grid * sizeof(double))); cudaCheck(hipMalloc((void**) &alpha_device , grid * sizeof(double))); cudaCheck(hipMalloc((void**) &beta_i_device , grid * sizeof(double))); cudaCheck(hipMalloc((void**) &beta_j_device , grid * sizeof(double))); cudaCheck(hipMalloc((void**) &beta_k_device , grid * sizeof(double))); cudaCheck(hipMalloc((void**) &lambda_device , grid * sizeof(double))); cudaCheck(hipGetLastError()); cudaCheck(hipMemcpy(phi_device , phi , grid * sizeof(double), hipMemcpyHostToDevice)); cudaCheck(hipMemcpy(phi_new_device, phi_new, grid * sizeof(double), hipMemcpyHostToDevice)); cudaCheck(hipMemcpy(rhs_device , rhs , grid * sizeof(double), hipMemcpyHostToDevice)); cudaCheck(hipMemcpy(alpha_device , alpha , grid * sizeof(double), hipMemcpyHostToDevice)); cudaCheck(hipMemcpy(beta_i_device , beta_i , grid * sizeof(double), hipMemcpyHostToDevice)); cudaCheck(hipMemcpy(beta_j_device , beta_j , grid * sizeof(double), hipMemcpyHostToDevice)); cudaCheck(hipMemcpy(beta_k_device , beta_k , grid * sizeof(double), hipMemcpyHostToDevice)); cudaCheck(hipMemcpy(lambda_device , lambda , grid * sizeof(double), hipMemcpyHostToDevice)); cudaCheck(hipGetLastError()); // Printing out some device data struct hipDeviceProp_t properties; hipGetDeviceProperties(&properties, 0); int maxGridSize = properties.maxGridSize[0]; int maxBlockSize = properties.maxThreadsDim[0]; int maxThreadCount = properties.maxThreadsPerBlock; size_t sharedMemoryPerBlock = properties.sharedMemPerBlock; printf("MaxGridDim1 %d, MaxBlockDim1 %d, MaxThreadPerBlock %d, SharedMemPerBlock %d\n", maxGridSize, maxBlockSize, maxThreadCount, sharedMemoryPerBlock); // Dimension // TODO, need to figure out how many long numOfThreads = pencil; // long numOfBlocks = ceil(pencil/numOfThreads); long numOfBlocks = ceil(grid/(numOfThreads*pencil*pencil)); // Unroll on i dim3 dimBlock(numOfThreads); dim3 dimGrid(numOfBlocks); printf("Config: #ofThreads %d, #ofBlocks %d\n", numOfThreads, numOfBlocks); hipEvent_t start, stop; float et; cudaCheck(hipEventCreate(&start)); cudaCheck(hipEventCreate(&stop)); cudaCheck(hipEventRecord(start)); printf("GSRBCuda Starting..\n"); auto t1 = std::chrono::high_resolution_clock::now(); for (int timestep = 0; timestep < 1; timestep++) { // Cuda Kernel Call hipLaunchKernelGGL(( GSRBKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, phi_device, phi_new_device, rhs_device, alpha_device, beta_i_device, beta_j_device , beta_k_device , lambda_device, 0); cudaCheck(hipGetLastError()); hipDeviceSynchronize(); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( GSRBKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, phi_device, phi_new_device, rhs_device, alpha_device, beta_i_device, beta_j_device , beta_k_device , lambda_device, 1); cudaCheck(hipGetLastError()); hipDeviceSynchronize(); cudaCheck(hipGetLastError()); // tmp = phi_new_device; // phi_new_device = phi_device; // phi_device = tmp; } auto t2 = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> fp_ms = t2 - t1; std::cout << "CUDA Time is " << fp_ms.count() << " milliseconds\n"; // Time event end cudaCheck(hipEventRecord(stop)); cudaCheck(hipEventSynchronize(stop)); cudaCheck(hipEventElapsedTime(&et, start, stop)); cudaCheck(hipEventDestroy(start)); cudaCheck(hipEventDestroy(stop)); printf("Cuda Time is %f\n", et); // More Memory Management cudaCheck(hipMemcpy(phi , phi_device, grid * sizeof(double), hipMemcpyDeviceToHost)); cudaCheck(hipMemcpy(phi_new, phi_new_device, grid * sizeof(double), hipMemcpyDeviceToHost)); cudaCheck(hipMemcpy(rhs , rhs_device, grid * sizeof(double), hipMemcpyDeviceToHost)); cudaCheck(hipMemcpy(alpha , alpha_device, grid * sizeof(double), hipMemcpyDeviceToHost)); cudaCheck(hipMemcpy(beta_i , beta_i_device, grid * sizeof(double), hipMemcpyDeviceToHost)); cudaCheck(hipMemcpy(beta_j , beta_j_device, grid * sizeof(double), hipMemcpyDeviceToHost)); cudaCheck(hipMemcpy(beta_k , beta_k_device, grid * sizeof(double), hipMemcpyDeviceToHost)); cudaCheck(hipMemcpy(lambda , lambda_device, grid * sizeof(double), hipMemcpyDeviceToHost)); cudaCheck(hipGetLastError()); hipFree(phi_device ); hipFree(phi_new_device); hipFree(rhs_device ); hipFree(alpha_device ); hipFree(beta_i_device ); hipFree(beta_j_device ); hipFree(beta_k_device ); hipFree(lambda_device ); }
376b4d5077b174f4e07544bc2ab14e1dc9ff4cf2.cu
#include "GSRBCuda.h" #include "GSRBConstants.h" #define DEFAULT_THRESHOLD 4000 #define BLOCKSIZE 64 #define TILESIZE 16 #define cudaCheck(x) _cudaCheck(x, #x ,__FILE__, __LINE__) template<typename T> void _cudaCheck(T e, const char* func, const char* call, const int line){ if(e != cudaSuccess){ printf("\"%s\" at %d in %s\n\treturned %d\n-> %s\n", func, line, call, (int)e, cudaGetErrorString(e)); exit(EXIT_FAILURE); } } __global__ void GSRBKernel(double* phi, double* phi_new, double* rhs, double* alpha, double* beta_i, double* beta_j, double* beta_k, double* lambda, int color) { int i, j, k; i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= pencil-1 || i == 0) { return; } for (k=1; k<pencil-1; k++) { for(j=1; j<pencil-1; j++) { int ijk = i + j*pencil + k*plane; // if ((i+j+k+color) % 2 == 0) { double helmholtz = alpha[ijk]*phi[ijk] - H2INV*( beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] ) - beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] ) + beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] ) - beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] ) + beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] ) - beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] ) ); phi_new[ijk] = phi[ijk] - lambda[ijk]*(helmholtz-rhs[ijk]); } } } } void GSRBCuda(double* phi, double* phi_new, double* rhs, double* alpha, double* beta_i, double* beta_j, double* beta_k, double* lambda) { //CUDA Buffers double* phi_device ; double* phi_new_device; double* rhs_device ; double* alpha_device ; double* beta_i_device ; double* beta_j_device ; double* beta_k_device ; double* lambda_device ; double* tmp; // Init Memory on GPU // Cuda Memory Management cudaCheck(cudaMalloc((void**) &phi_device , grid * sizeof(double))); cudaCheck(cudaMalloc((void**) &phi_new_device, grid * sizeof(double))); cudaCheck(cudaMalloc((void**) &rhs_device , grid * sizeof(double))); cudaCheck(cudaMalloc((void**) &alpha_device , grid * sizeof(double))); cudaCheck(cudaMalloc((void**) &beta_i_device , grid * sizeof(double))); cudaCheck(cudaMalloc((void**) &beta_j_device , grid * sizeof(double))); cudaCheck(cudaMalloc((void**) &beta_k_device , grid * sizeof(double))); cudaCheck(cudaMalloc((void**) &lambda_device , grid * sizeof(double))); cudaCheck(cudaGetLastError()); cudaCheck(cudaMemcpy(phi_device , phi , grid * sizeof(double), cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(phi_new_device, phi_new, grid * sizeof(double), cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(rhs_device , rhs , grid * sizeof(double), cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(alpha_device , alpha , grid * sizeof(double), cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(beta_i_device , beta_i , grid * sizeof(double), cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(beta_j_device , beta_j , grid * sizeof(double), cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(beta_k_device , beta_k , grid * sizeof(double), cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(lambda_device , lambda , grid * sizeof(double), cudaMemcpyHostToDevice)); cudaCheck(cudaGetLastError()); // Printing out some device data struct cudaDeviceProp properties; cudaGetDeviceProperties(&properties, 0); int maxGridSize = properties.maxGridSize[0]; int maxBlockSize = properties.maxThreadsDim[0]; int maxThreadCount = properties.maxThreadsPerBlock; size_t sharedMemoryPerBlock = properties.sharedMemPerBlock; printf("MaxGridDim1 %d, MaxBlockDim1 %d, MaxThreadPerBlock %d, SharedMemPerBlock %d\n", maxGridSize, maxBlockSize, maxThreadCount, sharedMemoryPerBlock); // Dimension // TODO, need to figure out how many long numOfThreads = pencil; // long numOfBlocks = ceil(pencil/numOfThreads); long numOfBlocks = ceil(grid/(numOfThreads*pencil*pencil)); // Unroll on i dim3 dimBlock(numOfThreads); dim3 dimGrid(numOfBlocks); printf("Config: #ofThreads %d, #ofBlocks %d\n", numOfThreads, numOfBlocks); cudaEvent_t start, stop; float et; cudaCheck(cudaEventCreate(&start)); cudaCheck(cudaEventCreate(&stop)); cudaCheck(cudaEventRecord(start)); printf("GSRBCuda Starting..\n"); auto t1 = std::chrono::high_resolution_clock::now(); for (int timestep = 0; timestep < 1; timestep++) { // Cuda Kernel Call GSRBKernel<<<dimGrid, dimBlock>>> (phi_device, phi_new_device, rhs_device, alpha_device, beta_i_device, beta_j_device , beta_k_device , lambda_device, 0); cudaCheck(cudaGetLastError()); cudaDeviceSynchronize(); cudaCheck(cudaGetLastError()); GSRBKernel<<<dimGrid, dimBlock>>> (phi_device, phi_new_device, rhs_device, alpha_device, beta_i_device, beta_j_device , beta_k_device , lambda_device, 1); cudaCheck(cudaGetLastError()); cudaDeviceSynchronize(); cudaCheck(cudaGetLastError()); // tmp = phi_new_device; // phi_new_device = phi_device; // phi_device = tmp; } auto t2 = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> fp_ms = t2 - t1; std::cout << "CUDA Time is " << fp_ms.count() << " milliseconds\n"; // Time event end cudaCheck(cudaEventRecord(stop)); cudaCheck(cudaEventSynchronize(stop)); cudaCheck(cudaEventElapsedTime(&et, start, stop)); cudaCheck(cudaEventDestroy(start)); cudaCheck(cudaEventDestroy(stop)); printf("Cuda Time is %f\n", et); // More Memory Management cudaCheck(cudaMemcpy(phi , phi_device, grid * sizeof(double), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(phi_new, phi_new_device, grid * sizeof(double), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(rhs , rhs_device, grid * sizeof(double), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(alpha , alpha_device, grid * sizeof(double), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(beta_i , beta_i_device, grid * sizeof(double), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(beta_j , beta_j_device, grid * sizeof(double), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(beta_k , beta_k_device, grid * sizeof(double), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(lambda , lambda_device, grid * sizeof(double), cudaMemcpyDeviceToHost)); cudaCheck(cudaGetLastError()); cudaFree(phi_device ); cudaFree(phi_new_device); cudaFree(rhs_device ); cudaFree(alpha_device ); cudaFree(beta_i_device ); cudaFree(beta_j_device ); cudaFree(beta_k_device ); cudaFree(lambda_device ); }
4d6bc634ba8c674708788f6f47623956be2e63cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../include/operators.h" #include "../include/split_op.h" #include "../include/kernels.h" #include "../include/dynamic.h" void laplacian(Grid &par, double2 *data, double2* out, int xDim, int yDim, int zDim, double dx, double dy, double dz){ dim3 grid = par.grid; dim3 threads = par.threads; int gsize = xDim * yDim * zDim; double2 *temp_derivative; cudaHandleError( hipMalloc((void **) &temp_derivative, sizeof(double2)*gsize) ); hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, data, temp_derivative, 1, gsize, dx); cudaCheckError(); hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, temp_derivative, temp_derivative, 1, gsize, dx); cudaCheckError(); hipLaunchKernelGGL(( copy), dim3(grid), dim3(threads), 0, 0, temp_derivative, out); cudaCheckError(); hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, data, temp_derivative, xDim, gsize, dy); cudaCheckError(); hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, temp_derivative, temp_derivative, xDim, gsize, dy); cudaCheckError(); hipLaunchKernelGGL(( sum), dim3(grid), dim3(threads), 0, 0, temp_derivative, out, out); cudaCheckError(); hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, data, temp_derivative, xDim*yDim, gsize, dz); cudaCheckError(); hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, temp_derivative, temp_derivative, xDim*yDim, gsize, dz); cudaCheckError(); hipLaunchKernelGGL(( sum), dim3(grid), dim3(threads), 0, 0, temp_derivative, out, out); cudaCheckError(); cudaHandleError( hipFree(temp_derivative) ); } void laplacian(Grid &par, double2 *data, double2* out, int xDim, int yDim, double dx, double dy){ dim3 grid = par.grid; dim3 threads = par.threads; int gsize = xDim * yDim; double2 *temp_derivative; cudaHandleError( hipMalloc((void **) &temp_derivative, sizeof(double2)*gsize) ); hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, data, temp_derivative, 1, gsize, dx); cudaCheckError(); hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, temp_derivative, temp_derivative, 1, gsize, dx); cudaCheckError(); hipLaunchKernelGGL(( copy), dim3(grid), dim3(threads), 0, 0, temp_derivative, out); cudaCheckError(); hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, data, temp_derivative, xDim, gsize, dy); cudaCheckError(); hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, temp_derivative, temp_derivative, xDim, gsize, dy); cudaCheckError(); hipLaunchKernelGGL(( sum), dim3(grid), dim3(threads), 0, 0, temp_derivative, out, out); cudaCheckError(); cudaHandleError( hipFree(temp_derivative) ); } void laplacian(Grid &par, double2 *data, double2* out, int xDim, double dx){ dim3 grid = par.grid; dim3 threads = par.threads; int gsize = xDim; hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, data, out, 1, gsize, dx); cudaCheckError(); hipLaunchKernelGGL(( derive), dim3(grid), dim3(threads), 0, 0, out, out, 1, gsize, dx); cudaCheckError(); } double sign(double x){ if (x < 0){ return -1.0; } else if (x == 0){ return 0.0; } else{ return 1.0; } } // Function to take the curl of Ax and Ay in 2d // note: This is on the cpu, there should be a GPU version too. double *curl2d(Grid &par, double *Ax, double *Ay){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int size = sizeof(double) * xDim * yDim; double *curl; curl = (double *)calloc(size, sizeof(double)); int index; // Note: To take the curl, we need a change in x and y to create a dx or dy // For this reason, we have added yDim to y and 1 to x for (int i = 0; i < yDim-1; i++){ for (int j = 0; j < xDim-1; j++){ index = j + xDim * i; curl[index] = (Ay[index] - Ay[index+1]) - (Ax[index] - Ax[index+yDim]); } } return curl; } double *curl3d_r(Grid &par, double *Bx, double *By){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int size = sizeof(double) * xDim * yDim * zDim; double *curl; curl = (double *)malloc(size); for (int i = 0; i < xDim*yDim*zDim; ++i){ curl[i] = sqrt(Bx[i]*Bx[i] + By[i] * By[i]); } return curl; } double *curl3d_phi(Grid &par, double *Bx, double *By){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int size = sizeof(double) * xDim * yDim * zDim; double *curl; curl = (double *)malloc(size); for (int i = 0; i < xDim*yDim*zDim; ++i){ curl[i] = atan2(By[i], Bx[i])+M_PI; } return curl; } // Function to take the curl of Ax and Ay in 2d // note: This is on the cpu, there should be a GPU version too. // Not complete yet! double *curl3d_x(Grid &par, double *Ax, double *Ay, double *Az){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int size = sizeof(double) * xDim * yDim * zDim; double *curl; curl = (double *)calloc(size, sizeof(double)); int index; // Note: To take the curl, we need a change in x and y to create a dx or dy // For this reason, we have added yDim to y and 1 to x for (int i = 0; i < zDim-1; i++){ for (int j = 0; j < yDim-1; j++){ for (int k = 0; k < xDim-1; k++){ index = k + xDim * j + xDim * yDim * i; curl[index] = (Az[index] - Az[index + zDim]) -(Ay[index] - Ay[index + zDim*yDim]); } } } return curl; } // Function to take the curl of Ax and Ay in 2d // note: This is on the cpu, there should be a GPU version too. // Not complete yet! double *curl3d_y(Grid &par, double *Ax, double *Ay, double *Az){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int size = sizeof(double) * xDim * yDim * zDim; double *curl; curl = (double *)calloc(size, sizeof(double)); int index; // Note: To take the curl, we need a change in x and y to create a dx or dy // For this reason, we have added yDim to y and 1 to x for (int i = 0; i < zDim-1; i++){ for (int j = 0; j < yDim-1; j++){ for (int k = 0; k < xDim - 1; k++){ index = k + xDim * j + xDim * yDim * i; curl[index] = -(Az[index] - Az[index + 1]) -(Ax[index] - Ax[index + xDim*yDim]); } } } return curl; } // Function to take the curl of Ax and Ay in 2d // note: This is on the cpu, there should be a GPU version too. // Not complete yet! double *curl3d_z(Grid &par, double *Ax, double *Ay, double *Az){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int size = sizeof(double) * xDim * yDim * zDim; double *curl; curl = (double *)calloc(size, sizeof(double)); int index; // Note: To take the curl, we need a change in x and y to create a dx or dy // For this reason, we have added yDim to y and 1 to x for (int i = 0; i < zDim-1; i++){ for (int j = 0; j < yDim-1; j++){ for (int k = 0; k < xDim-1; k++){ index = k + xDim * j + xDim * yDim * i; curl[index] = (Ay[index] - Ay[index + 1]) -(Ax[index] - Ax[index + xDim]); } } } return curl; } // Function to check whether a file exists std::string filecheck(std::string filename){ struct stat buffer = {0}; if (stat(filename.c_str(), &buffer) == -1){ std::cout << "File " << filename << " does not exist!" << '\n'; std::cout << "Please select a new file:" << '\n'; std::cin >> filename; filename = filecheck(filename); } return filename; } // Function to read Ax from file. // Note that this comes with a special method in init... void file_A(std::string filename, double *A, double omega){ std::fstream infile(filename, std::ios_base::in); double inval; int count = 0; while (infile >> inval){ A[count] = omega*inval; count++; } } /*----------------------------------------------------------------------------// * GPU KERNELS *-----------------------------------------------------------------------------*/ // Function to generate momentum grids void generate_p_space(Grid &par){ int dimnum = par.ival("dimnum"); int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); double xMax = par.dval("xMax"); double yMax = 0; if (dimnum > 1){ yMax = par.dval("yMax"); } double zMax = 0; if (dimnum == 3){ zMax = par.dval("zMax"); } double pxMax = par.dval("pxMax"); double pyMax = 0; if (dimnum > 1){ pyMax = par.dval("pyMax"); } double pzMax = 0; if (dimnum == 3){ pzMax = par.dval("pzMax"); } double dx = par.dval("dx"); double dy = 0; if (dimnum > 1){ dy = par.dval("dy"); } double dz = 0; if (dimnum == 3){ dz = par.dval("dz"); } double dpx = par.dval("dpx"); double dpy = 0; if (dimnum > 1){ dpy = par.dval("dpy"); } double dpz = 0; if (dimnum == 3){ dpz = par.dval("dpz"); } double *x, *y, *z, *px, *py, *pz, *x_gpu, *y_gpu, *z_gpu, *px_gpu, *py_gpu, *pz_gpu; x = (double *) malloc(sizeof(double) * xDim); y = (double *) malloc(sizeof(double) * yDim); z = (double *) malloc(sizeof(double) * zDim); px = (double *) malloc(sizeof(double) * xDim); py = (double *) malloc(sizeof(double) * yDim); pz = (double *) malloc(sizeof(double) * zDim); if (dimnum == 2){ for(int i=0; i<xDim/2; ++i){ x[i] = -xMax + i*dx; x[i + (xDim/2)] = i*dx; px[i] = i*dpx; px[i + (xDim/2)] = -pxMax + i*dpx; } for(int i=0; i<yDim/2; ++i){ y[i] = -yMax + i*dy; y[i + (yDim/2)] = i*dy; py[i] = i*dpy; py[i + (yDim/2)] = -pyMax + i*dpy; } for(int i = 0; i < zDim; ++i){ z[i] = 0; pz[i] = 0; } } else if(dimnum == 3){ for(int i=0; i<xDim/2; ++i){ x[i] = -xMax + i*dx; x[i + (xDim/2)] = i*dx; px[i] = i*dpx; px[i + (xDim/2)] = -pxMax + i*dpx; } for(int i=0; i<yDim/2; ++i){ y[i] = -yMax + i*dy; y[i + (yDim/2)] = i*dy; py[i] = i*dpy; py[i + (yDim/2)] = -pyMax + i*dpy; } for(int i=0; i<zDim/2; ++i){ z[i] = -zMax + i*dz; z[i + (zDim/2)] = i*dz; pz[i] = i*dpz; pz[i + (zDim/2)] = -pzMax + i*dpz; } } else if (dimnum == 1){ for(int i=0; i<xDim/2; ++i){ x[i] = -xMax + i*dx; x[i + (xDim/2)] = i*dx; px[i] = i*dpx; px[i + (xDim/2)] = -pxMax + i*dpx; } for(int i = 0; i < zDim; ++i){ z[i] = 0; pz[i] = 0; y[i] = 0; py[i] = 0; } } par.store("x",x); par.store("y",y); par.store("z",z); par.store("px",px); par.store("py",py); par.store("pz",pz); // Now move these items to the gpu cudaHandleError( hipMalloc((void**) &x_gpu, sizeof(double) * xDim) ); cudaHandleError( hipMalloc((void**) &y_gpu, sizeof(double) * yDim) ); cudaHandleError( hipMalloc((void**) &z_gpu, sizeof(double) * zDim) ); cudaHandleError( hipMalloc((void**) &px_gpu, sizeof(double) * xDim) ); cudaHandleError( hipMalloc((void**) &py_gpu, sizeof(double) * yDim) ); cudaHandleError( hipMalloc((void**) &pz_gpu, sizeof(double) * zDim) ); cudaHandleError( hipMemcpy(x_gpu, x, sizeof(double)*xDim, hipMemcpyHostToDevice) ); cudaHandleError( hipMemcpy(y_gpu, y, sizeof(double)*yDim, hipMemcpyHostToDevice) ); cudaHandleError( hipMemcpy(z_gpu, z, sizeof(double)*zDim, hipMemcpyHostToDevice) ); cudaHandleError( hipMemcpy(px_gpu, px, sizeof(double)*xDim, hipMemcpyHostToDevice) ); cudaHandleError( hipMemcpy(py_gpu, py, sizeof(double)*yDim, hipMemcpyHostToDevice) ); cudaHandleError( hipMemcpy(pz_gpu, pz, sizeof(double)*zDim, hipMemcpyHostToDevice) ); par.store("x_gpu",x_gpu); par.store("y_gpu",y_gpu); par.store("z_gpu",z_gpu); par.store("px_gpu",px_gpu); par.store("py_gpu",py_gpu); par.store("pz_gpu",pz_gpu); } // This function is basically a wrapper to call the appropriate K kernel void generate_K(Grid &par){ // For k, we need xp, yp, and zp. These will also be used in generating // pAxyz parameters, so it should already be stored in par. double *px_gpu = par.dsval("px_gpu"); double *py_gpu = par.dsval("py_gpu"); double *pz_gpu = par.dsval("pz_gpu"); double gSize = par.ival("gSize"); double mass = par.dval("mass"); // Creating K to work with double *K, *K_gpu; K = (double*)malloc(sizeof(double)*gSize); cudaHandleError( hipMalloc((void**) &K_gpu, sizeof(double)*gSize) ); hipLaunchKernelGGL(( simple_K), dim3(par.grid), dim3(par.threads), 0, 0, px_gpu, py_gpu, pz_gpu, mass, K_gpu); cudaCheckError(); cudaHandleError( hipMemcpy(K, K_gpu, sizeof(double)*gSize, hipMemcpyDeviceToHost) ); par.store("K",K); par.store("K_gpu",K_gpu); } // Simple kernel for generating K __global__ void simple_K(double *xp, double *yp, double *zp, double mass, double *K){ unsigned int gid = getGid3d3d(); unsigned int xid = blockDim.x*blockIdx.x + threadIdx.x; unsigned int yid = blockDim.y*blockIdx.y + threadIdx.y; unsigned int zid = blockDim.z*blockIdx.z + threadIdx.z; K[gid] = (HBAR*HBAR/(2*mass))*(xp[xid]*xp[xid] + yp[yid]*yp[yid] + zp[zid]*zp[zid]); } // Function to generate game fields void generate_gauge(Grid &par){ int gSize = par.ival("gSize"); int dimnum = par.ival("dimnum"); double *Ax, *Ay, *Az, *Ax_gpu, *Ay_gpu, *Az_gpu; double *x_gpu = par.dsval("x_gpu"); double *y_gpu = par.dsval("y_gpu"); double *z_gpu = par.dsval("z_gpu"); double xMax = par.dval("xMax"); double yMax = par.dval("yMax"); double zMax = 1; if (dimnum == 3){ zMax = par.dval("zMax"); } double omegaX = par.dval("omegaX"); double omegaY = par.dval("omegaY"); double omegaZ; if (dimnum == 3){ omegaZ = par.dval("omegaZ"); } double omega = par.dval("omega"); double fudge = par.dval("fudge"); Ax = (double *)malloc(sizeof(double)*gSize); Ay = (double *)malloc(sizeof(double)*gSize); Az = (double *)malloc(sizeof(double)*gSize); cudaHandleError( hipMalloc((void**) &Ax_gpu, sizeof(double)*gSize) ); cudaHandleError( hipMalloc((void**) &Ay_gpu, sizeof(double)*gSize) ); cudaHandleError( hipMalloc((void**) &Az_gpu, sizeof(double)*gSize) ); if (par.Afn == "file"){ file_A(par.Axfile, Ax, omega); cudaHandleError( hipMemcpy(Ax_gpu, Ax, sizeof(double)*gSize, hipMemcpyHostToDevice) ); if (dimnum > 1){ file_A(par.Ayfile, Ay, omega); cudaHandleError( hipMemcpy(Ay_gpu,Ay,sizeof(double)*gSize,hipMemcpyHostToDevice) ); } if (dimnum == 3){ file_A(par.Azfile, Az, omega); cudaHandleError( hipMemcpy(Az_gpu,Az,sizeof(double)*gSize,hipMemcpyHostToDevice) ); } std::cout << "finished reading Ax / Ay / Az from file" << '\n'; } else{ if (par.is_ast_gpu("Ax")){ double dx = par.dval("dx"); double dy = par.dval("dy"); double dz = par.dval("dz"); double xMax = par.dval("xMax"); double yMax = par.dval("yMax"); double zMax = 0; if (dimnum == 3){ zMax = par.dval("zMax"); } EqnNode_gpu *eqn = par.astval("Ax"); hipLaunchKernelGGL(( find_field), dim3(par.grid), dim3(par.threads), 0, 0, Ax_gpu, dx, dy, dz, xMax, yMax, zMax, 0, eqn); cudaCheckError(); } else{ parhipLaunchKernelGGL((.Ax_fn), dim3(par.grid), dim3(par.threads), 0, 0, x_gpu, y_gpu, z_gpu, xMax, yMax, zMax, omegaX, omegaY, omegaZ, omega, fudge, Ax_gpu); cudaCheckError(); } if (par.is_ast_gpu("Ay")){ double dx = par.dval("dx"); double dy = par.dval("dy"); double dz = par.dval("dz"); double xMax = par.dval("xMax"); double yMax = par.dval("yMax"); double zMax = 0; if (dimnum == 3){ zMax = par.dval("zMax"); } EqnNode_gpu *eqn = par.astval("Ay"); hipLaunchKernelGGL(( find_field), dim3(par.grid), dim3(par.threads), 0, 0, Ay_gpu, dx, dy, dz, xMax, yMax, zMax , 0, eqn); cudaCheckError(); } else{ parhipLaunchKernelGGL((.Ay_fn), dim3(par.grid), dim3(par.threads), 0, 0, x_gpu, y_gpu, z_gpu, xMax, yMax, zMax, omegaX, omegaY, omegaZ, omega, fudge, Ay_gpu); cudaCheckError(); } if (dimnum == 3){ if (par.is_ast_gpu("Az")){ double dx = par.dval("dx"); double dy = par.dval("dy"); double dz = par.dval("dz"); double xMax = par.dval("xMax"); double yMax = par.dval("yMax"); double zMax = 0; if (dimnum == 3){ zMax = par.dval("zMax"); } EqnNode_gpu *eqn = par.astval("Az"); hipLaunchKernelGGL(( find_field), dim3(par.grid), dim3(par.threads), 0, 0, Az_gpu, dx, dy, dz, xMax, yMax, zMax, 0, eqn); cudaCheckError(); } else{ parhipLaunchKernelGGL((.Az_fn), dim3(par.grid), dim3(par.threads), 0, 0, x_gpu, y_gpu, z_gpu, xMax, yMax, zMax, omegaX, omegaY, omegaZ, omega, fudge, Az_gpu); cudaCheckError(); } } else{ hipLaunchKernelGGL(( kconstant_A), dim3(par.grid), dim3(par.threads), 0, 0, x_gpu, y_gpu, z_gpu, xMax, yMax, zMax, omegaX, omegaY, omegaZ, omega, fudge, Az_gpu); cudaCheckError(); } } cudaHandleError( hipMemcpy(Ax, Ax_gpu, sizeof(double)*gSize,hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(Ay, Ay_gpu, sizeof(double)*gSize,hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(Az, Az_gpu, sizeof(double)*gSize,hipMemcpyDeviceToHost) ); par.store("Ax", Ax); par.store("Ay", Ay); par.store("Az", Az); par.store("Ax_gpu", Ax_gpu); par.store("Ay_gpu", Ay_gpu); par.store("Az_gpu", Az_gpu); } // constant Kernel A __global__ void kconstant_A(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); A[gid] = 0; } // Kernel for simple rotational case, Ax __global__ void krotation_Ax(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int yid = blockDim.y*blockIdx.y + threadIdx.y; A[gid] = -y[yid] * omega * omegaX; } // Kernel for simple rotational case, Ay __global__ void krotation_Ay(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; A[gid] = x[xid] * omega * omegaY; } // Kernel for simple rotational case, Ax __global__ void kring_rotation_Ax(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double theta = atan2(y[yid],x[xid]); A[gid] = (z[zid]+zMax)*cos(theta)*omega*omegaX; } // Kernel for simple rotational case, Ay __global__ void kring_rotation_Ay(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double theta = atan2(y[yid],x[xid]); A[gid] = (z[zid]+zMax)*sin(theta)*omega*omegaX; } // Kernel for simple rotational case, Az __global__ void kring_rotation_Az(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; double r = sqrt(x[xid]*x[xid] + y[yid]*y[yid]); A[gid] = r*omega*omegaX; } // kernel for a simple vortex ring __global__ void kring_Az(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; double rad = sqrt(x[xid]*x[xid] + y[yid]*y[yid]); A[gid] = omega * exp(-rad*rad / (0.0001*xMax)) * 0.01; } // testing kernel Ax __global__ void ktest_Ax(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int yid = blockDim.y*blockIdx.y + threadIdx.y; A[gid] = (sin(y[yid] * 100000)+1) * yMax * omega; } // testing kernel Ay __global__ void ktest_Ay(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); A[gid] = 0; } // function to generate V void generate_fields(Grid &par){ generate_p_space(par); generate_K(par); generate_gauge(par); int gSize = par.ival("gSize"); int dimnum = par.ival("dimnum"); int winding = par.dval("winding"); bool energy_calc = par.bval("energy_calc"); double dt = par.dval("dt"); double gdt = par.dval("gdt"); double *x_gpu = par.dsval("x_gpu"); double *y_gpu = par.dsval("y_gpu"); double *z_gpu = par.dsval("z_gpu"); double *px_gpu = par.dsval("px_gpu"); double *py_gpu = par.dsval("py_gpu"); double *pz_gpu = par.dsval("pz_gpu"); double *Ax_gpu = par.dsval("Ax_gpu"); double *Ay_gpu = par.dsval("Ay_gpu"); double *Az_gpu = par.dsval("Az_gpu"); double *K_gpu = par.dsval("K_gpu"); // Creating items list for kernels double *items, *items_gpu; int item_size = 18; items = (double*)malloc(sizeof(double)*item_size); cudaHandleError( hipMalloc((void**) &items_gpu, sizeof(double)*item_size) ); for (int i = 0; i < item_size; ++i){ items[i] = 0; } items[0] = par.dval("xMax"); items[1] = par.dval("yMax"); if (dimnum == 3){ items[2] = par.dval("zMax"); } items[3] = par.dval("omegaX"); items[4] = par.dval("omegaY"); if (dimnum == 3){ items[5] = par.dval("omegaZ"); } items[6] = par.dval("x0_shift"); items[7] = par.dval("y0_shift"); if (dimnum == 3){ items[8] = par.dval("z0_shift"); } else{ items[8] = 0.0; } items[9] = par.dval("mass"); items[10] = par.dval("gammaY"); items[11] = 1.0; // For gammaZ items[12] = par.dval("fudge"); items[13] = 0.0; // For time items[14] = par.dval("Rxy"); items[15] = par.dval("a0x"); items[16] = par.dval("a0y"); if (dimnum == 3){ items[17] = par.dval("a0z"); } else{ items[17] = 1.0; } cudaHandleError( hipMemcpy(items_gpu, items, sizeof(double)*item_size, hipMemcpyHostToDevice) ); double fudge = par.dval("fudge"); // Generating V double *V, *V_gpu; V = (double *)malloc(sizeof(double)*gSize); hipMalloc((void **) &V_gpu, sizeof(double)*gSize); if (par.is_ast_gpu("V")){ double dx = par.dval("dx"); double dy = par.dval("dy"); double dz = par.dval("dz"); double xMax = par.dval("xMax"); double yMax = par.dval("yMax"); double zMax = 0; if (dimnum == 3){ zMax = par.dval("zMax"); } EqnNode_gpu *eqn = par.astval("V"); hipLaunchKernelGGL(( find_field), dim3(par.grid), dim3(par.threads), 0, 0, V_gpu, dx, dy, dz, xMax, yMax, zMax, 0, eqn); cudaCheckError(); } else{ parhipLaunchKernelGGL((.V_fn), dim3(par.grid), dim3(par.threads), 0, 0, x_gpu, y_gpu, z_gpu, items_gpu, Ax_gpu, Ay_gpu, Az_gpu, V_gpu); cudaCheckError(); } cudaHandleError( hipMemcpy(V, V_gpu, sizeof(double)*gSize, hipMemcpyDeviceToHost) ); // Generating wfc double2 *wfc, *wfc_gpu; double *phi, *phi_gpu; wfc = (double2 *)malloc(sizeof(double2)*gSize); phi = (double *)malloc(sizeof(double)*gSize); cudaHandleError( hipMalloc((void**) &wfc_gpu, sizeof(double2)*gSize) ); cudaHandleError( hipMalloc((void**) &phi_gpu, sizeof(double)*gSize) ); if (par.bval("read_wfc")){ wfc = par.cufftDoubleComplexval("wfc"); cudaHandleError( hipMemcpy(wfc_gpu, wfc, sizeof(double2)*gSize, hipMemcpyHostToDevice) ); } else{ parhipLaunchKernelGGL((.wfc_fn), dim3(par.grid), dim3(par.threads), 0, 0, x_gpu, y_gpu, z_gpu, items_gpu, winding, phi_gpu, wfc_gpu); cudaCheckError(); cudaHandleError( hipMemcpy(wfc, wfc_gpu, sizeof(double2)*gSize, hipMemcpyDeviceToHost) ); } cudaHandleError( hipMemcpy(phi, phi_gpu, sizeof(double)*gSize, hipMemcpyDeviceToHost) ); // generating aux fields. double2 *GV, *EV, *GK, *EK; double2 *GV_gpu, *EV_gpu, *GK_gpu, *EK_gpu; double2 *GpAx, *GpAy, *GpAz, *EpAx, *EpAy, *EpAz; double2 *GpAx_gpu, *GpAy_gpu, *GpAz_gpu, *EpAx_gpu, *EpAy_gpu, *EpAz_gpu; double *pAx, *pAy, *pAz; double *pAx_gpu, *pAy_gpu, *pAz_gpu; GV = (double2 *)malloc(sizeof(double2)*gSize); EV = (double2 *)malloc(sizeof(double2)*gSize); GK = (double2 *)malloc(sizeof(double2)*gSize); EK = (double2 *)malloc(sizeof(double2)*gSize); GpAx = (double2 *)malloc(sizeof(double2)*gSize); EpAx = (double2 *)malloc(sizeof(double2)*gSize); GpAy = (double2 *)malloc(sizeof(double2)*gSize); EpAy = (double2 *)malloc(sizeof(double2)*gSize); GpAz = (double2 *)malloc(sizeof(double2)*gSize); EpAz = (double2 *)malloc(sizeof(double2)*gSize); pAx = (double *)malloc(sizeof(double)*gSize); pAy = (double *)malloc(sizeof(double)*gSize); pAz = (double *)malloc(sizeof(double)*gSize); cudaHandleError( hipMalloc((void**) &GV_gpu, sizeof(double2)*gSize) ); cudaHandleError( hipMalloc((void**) &EV_gpu, sizeof(double2)*gSize) ); cudaHandleError( hipMalloc((void**) &GK_gpu, sizeof(double2)*gSize) ); cudaHandleError( hipMalloc((void**) &EK_gpu, sizeof(double2)*gSize) ); cudaHandleError( hipMalloc((void**) &GpAx_gpu, sizeof(double2)*gSize) ); cudaHandleError( hipMalloc((void**) &EpAx_gpu, sizeof(double2)*gSize) ); cudaHandleError( hipMalloc((void**) &GpAy_gpu, sizeof(double2)*gSize) ); cudaHandleError( hipMalloc((void**) &EpAy_gpu, sizeof(double2)*gSize) ); cudaHandleError( hipMalloc((void**) &GpAz_gpu, sizeof(double2)*gSize) ); cudaHandleError( hipMalloc((void**) &EpAz_gpu, sizeof(double2)*gSize) ); cudaHandleError( hipMalloc((void**) &pAx_gpu, sizeof(double)*gSize) ); cudaHandleError( hipMalloc((void**) &pAy_gpu, sizeof(double)*gSize) ); cudaHandleError( hipMalloc((void**) &pAz_gpu, sizeof(double)*gSize) ); hipLaunchKernelGGL(( aux_fields), dim3(par.grid), dim3(par.threads), 0, 0, V_gpu, K_gpu, gdt, dt, Ax_gpu, Ay_gpu, Az_gpu, px_gpu, py_gpu, pz_gpu, pAx_gpu, pAy_gpu, pAz_gpu, GV_gpu, EV_gpu, GK_gpu, EK_gpu, GpAx_gpu, GpAy_gpu, GpAz_gpu, EpAx_gpu, EpAy_gpu, EpAz_gpu); cudaCheckError(); cudaHandleError( hipMemcpy(GV, GV_gpu, sizeof(double2)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(EV, EV_gpu, sizeof(double2)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(GK, GK_gpu, sizeof(double2)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(EK, EK_gpu, sizeof(double2)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(GpAx, GpAx_gpu, sizeof(double2)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(EpAx, EpAx_gpu, sizeof(double2)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(GpAy, GpAy_gpu, sizeof(double2)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(EpAy, EpAy_gpu, sizeof(double2)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(GpAz, GpAz_gpu, sizeof(double2)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(EpAz, EpAz_gpu, sizeof(double2)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(pAx, pAx_gpu, sizeof(double)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(pAy, pAy_gpu, sizeof(double)*gSize, hipMemcpyDeviceToHost) ); cudaHandleError( hipMemcpy(pAz, pAz_gpu, sizeof(double)*gSize, hipMemcpyDeviceToHost) ); // Storing variables cudaHandleError( hipFree(items_gpu) ); //hipFree(phi_gpu); cudaHandleError( hipFree(GV_gpu) ); cudaHandleError( hipFree(EV_gpu) ); cudaHandleError( hipFree(GK_gpu) ); cudaHandleError( hipFree(EK_gpu) ); cudaHandleError( hipFree(pAx_gpu) ); cudaHandleError( hipFree(pAy_gpu) ); cudaHandleError( hipFree(pAz_gpu) ); cudaHandleError( hipFree(GpAx_gpu) ); cudaHandleError( hipFree(GpAy_gpu) ); cudaHandleError( hipFree(GpAz_gpu) ); cudaHandleError( hipFree(EpAx_gpu) ); cudaHandleError( hipFree(EpAy_gpu) ); cudaHandleError( hipFree(EpAz_gpu) ); cudaHandleError( hipFree(x_gpu) ); cudaHandleError( hipFree(y_gpu) ); cudaHandleError( hipFree(z_gpu) ); cudaHandleError( hipFree(px_gpu) ); cudaHandleError( hipFree(py_gpu) ); cudaHandleError( hipFree(pz_gpu) ); if (!energy_calc){ cudaHandleError( hipFree(K_gpu) ); cudaHandleError( hipFree(V_gpu) ); cudaHandleError( hipFree(Ax_gpu) ); cudaHandleError( hipFree(Ay_gpu) ); cudaHandleError( hipFree(Az_gpu) ); } else{ par.store("V_gpu",V_gpu); } par.store("V",V); par.store("items", items); //par.store("items_gpu", items_gpu); par.store("wfc", wfc); par.store("wfc_gpu", wfc_gpu); par.store("Phi", phi); par.store("Phi_gpu", phi_gpu); par.store("GV",GV); par.store("EV",EV); par.store("GK",GK); par.store("EK",EK); //par.store("GV_gpu",GV_gpu); //par.store("EV_gpu",EV_gpu); //par.store("GK_gpu",GK_gpu); //par.store("EK_gpu",EK_gpu); par.store("GpAx",GpAx); par.store("EpAx",EpAx); par.store("GpAy",GpAy); par.store("EpAy",EpAy); par.store("GpAz",GpAz); par.store("EpAz",EpAz); par.store("pAx",pAx); par.store("pAy",pAy); par.store("pAz",pAz); //par.store("pAx_gpu",pAx_gpu); //par.store("pAy_gpu",pAy_gpu); //par.store("pAz_gpu",pAz_gpu); } __global__ void kharmonic_V(double *x, double *y, double *z, double* items, double *Ax, double *Ay, double *Az, double *V){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double V_x = items[3]*(x[xid]+items[6]); double V_y = items[10]*items[4]*(y[yid]+items[7]); double V_z = items[11]*items[5]*(z[zid]+items[8]); V[gid] = 0.5*items[9]*((V_x*V_x + V_y*V_y + V_z*V_z) + (Ax[gid]*Ax[gid] + Ay[gid]*Ay[gid] + Az[gid]*Az[gid])); } // kernel for simple 3d torus trapping potential __global__ void ktorus_V(double *x, double *y, double *z, double* items, double *Ax, double *Ay, double *Az, double *V){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double rad = sqrt((x[xid] - items[6]) * (x[xid] - items[6]) + (y[yid] - items[7]) * (y[yid] - items[7])) - 0.5*items[0]; double omegaR = (items[3]*items[3] + items[4]*items[4]); double V_tot = (2*items[5]*items[5]*(z[zid] - items[8])*(z[zid] - items[8]) + omegaR*(rad*rad + items[12]*rad*z[zid])); V[gid] = 0.5*items[9]*(V_tot + Ax[gid]*Ax[gid] + Ay[gid]*Ay[gid] + Az[gid]*Az[gid]); } __global__ void kstd_wfc(double *x, double *y, double *z, double *items, double winding, double *phi, double2 *wfc){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; phi[gid] = -fmod(winding*atan2(y[yid], x[xid]),2*PI); wfc[gid].x = exp(-(x[xid]*x[xid]/(items[14]*items[14]*items[15]*items[15]) + y[yid]*y[yid]/(items[14]*items[14]*items[16]*items[16]) + z[zid]*z[zid]/(items[14]*items[14]*items[17]*items[17]))) * cos(phi[gid]); wfc[gid].y = -exp(-(x[xid]*x[xid]/(items[14]*items[14]*items[15]*items[15]) + y[yid]*y[yid]/(items[14]*items[14]*items[16]*items[16]) + z[zid]*z[zid]/(items[14]*items[14]*items[17]*items[17]))) * sin(phi[gid]); } __global__ void ktorus_wfc(double *x, double *y, double *z, double *items, double winding, double *phi, double2 *wfc){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double rad = sqrt((x[xid] - items[6]) * (x[xid] - items[6]) + (y[yid] - items[7]) * (y[yid] - items[7])) - 0.5*items[0]; wfc[gid].x = exp(-( pow((rad)/(items[14]*items[15]*0.5),2) + pow((z[zid])/(items[14]*items[17]*0.5),2) ) ); wfc[gid].y = 0.0; } __global__ void aux_fields(double *V, double *K, double gdt, double dt, double* Ax, double *Ay, double* Az, double *px, double *py, double *pz, double* pAx, double* pAy, double* pAz, double2* GV, double2* EV, double2* GK, double2* EK, double2* GpAx, double2* GpAy, double2* GpAz, double2* EpAx, double2* EpAy, double2* EpAz){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; GV[gid].x = exp(-V[gid]*(gdt/(2*HBAR))); GK[gid].x = exp(-K[gid]*(gdt/HBAR)); GV[gid].y = 0.0; GK[gid].y = 0.0; // Ax and Ay will be calculated here but are used only for // debugging. They may be needed later for magnetic field calc pAx[gid] = Ax[gid] * px[xid]; pAy[gid] = Ay[gid] * py[yid]; pAz[gid] = Az[gid] * pz[zid]; GpAx[gid].x = exp(-pAx[gid]*gdt); GpAx[gid].y = 0; GpAy[gid].x = exp(-pAy[gid]*gdt); GpAy[gid].y = 0; GpAz[gid].x = exp(-pAz[gid]*gdt); GpAz[gid].y = 0; EV[gid].x=cos(-V[gid]*(dt/(2*HBAR))); EV[gid].y=sin(-V[gid]*(dt/(2*HBAR))); EK[gid].x=cos(-K[gid]*(dt/HBAR)); EK[gid].y=sin(-K[gid]*(dt/HBAR)); EpAz[gid].x=cos(-pAz[gid]*dt); EpAz[gid].y=sin(-pAz[gid]*dt); EpAy[gid].x=cos(-pAy[gid]*dt); EpAy[gid].y=sin(-pAy[gid]*dt); EpAx[gid].x=cos(-pAx[gid]*dt); EpAx[gid].y=sin(-pAx[gid]*dt); } // Function to generate grids and treads for 2d and 3d cases void generate_grid(Grid& par){ int dimnum = par.ival("dimnum"); int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int xD = 1, yD = 1, zD = 1; int max_threads = 256; if (xDim < max_threads){ max_threads = xDim; } if (dimnum == 2){ if (xDim <= max_threads){ par.threads.x = xDim; par.threads.y = 1; par.threads.z = 1; xD = 1; yD = yDim; zD = 1; } else{ int count = 0; int dim_tmp = xDim; while (dim_tmp > max_threads){ count++; dim_tmp /= 2; } std::cout << "count is: " << count << '\n'; par.threads.x = dim_tmp; par.threads.y = 1; par.threads.z = 1; xD = pow(2,count); yD = yDim; zD = 1; } } else if (dimnum == 3){ if (xDim <= max_threads){ par.threads.x = xDim; par.threads.y = 1; par.threads.z = 1; xD = 1; yD = yDim; zD = zDim; } else{ int count = 0; int dim_tmp = xDim; while (dim_tmp > max_threads){ count++; dim_tmp /= 2; } std::cout << "count is: " << count << '\n'; par.threads.x = dim_tmp; par.threads.y = 1; par.threads.z = 1; xD = pow(2,count); yD = yDim; zD = zDim; } } else if (dimnum == 1){ par.threads.x = xDim; } par.grid.x=xD; par.grid.y=yD; par.grid.z=zD; std::cout << "threads in x are: " << par.threads.x << '\n'; std::cout << "dimensions are: " << par.grid.x << '\t' << par.grid.y << '\t' << par.grid.z << '\n'; }
4d6bc634ba8c674708788f6f47623956be2e63cc.cu
#include "../include/operators.h" #include "../include/split_op.h" #include "../include/kernels.h" #include "../include/dynamic.h" void laplacian(Grid &par, double2 *data, double2* out, int xDim, int yDim, int zDim, double dx, double dy, double dz){ dim3 grid = par.grid; dim3 threads = par.threads; int gsize = xDim * yDim * zDim; double2 *temp_derivative; cudaHandleError( cudaMalloc((void **) &temp_derivative, sizeof(double2)*gsize) ); derive<<<grid, threads>>>(data, temp_derivative, 1, gsize, dx); cudaCheckError(); derive<<<grid, threads>>>(temp_derivative, temp_derivative, 1, gsize, dx); cudaCheckError(); copy<<<grid, threads>>>(temp_derivative, out); cudaCheckError(); derive<<<grid, threads>>>(data, temp_derivative, xDim, gsize, dy); cudaCheckError(); derive<<<grid, threads>>>(temp_derivative, temp_derivative, xDim, gsize, dy); cudaCheckError(); sum<<<grid, threads>>>(temp_derivative, out, out); cudaCheckError(); derive<<<grid, threads>>>(data, temp_derivative, xDim*yDim, gsize, dz); cudaCheckError(); derive<<<grid, threads>>>(temp_derivative, temp_derivative, xDim*yDim, gsize, dz); cudaCheckError(); sum<<<grid, threads>>>(temp_derivative, out, out); cudaCheckError(); cudaHandleError( cudaFree(temp_derivative) ); } void laplacian(Grid &par, double2 *data, double2* out, int xDim, int yDim, double dx, double dy){ dim3 grid = par.grid; dim3 threads = par.threads; int gsize = xDim * yDim; double2 *temp_derivative; cudaHandleError( cudaMalloc((void **) &temp_derivative, sizeof(double2)*gsize) ); derive<<<grid, threads>>>(data, temp_derivative, 1, gsize, dx); cudaCheckError(); derive<<<grid, threads>>>(temp_derivative, temp_derivative, 1, gsize, dx); cudaCheckError(); copy<<<grid, threads>>>(temp_derivative, out); cudaCheckError(); derive<<<grid, threads>>>(data, temp_derivative, xDim, gsize, dy); cudaCheckError(); derive<<<grid, threads>>>(temp_derivative, temp_derivative, xDim, gsize, dy); cudaCheckError(); sum<<<grid, threads>>>(temp_derivative, out, out); cudaCheckError(); cudaHandleError( cudaFree(temp_derivative) ); } void laplacian(Grid &par, double2 *data, double2* out, int xDim, double dx){ dim3 grid = par.grid; dim3 threads = par.threads; int gsize = xDim; derive<<<grid, threads>>>(data, out, 1, gsize, dx); cudaCheckError(); derive<<<grid, threads>>>(out, out, 1, gsize, dx); cudaCheckError(); } double sign(double x){ if (x < 0){ return -1.0; } else if (x == 0){ return 0.0; } else{ return 1.0; } } // Function to take the curl of Ax and Ay in 2d // note: This is on the cpu, there should be a GPU version too. double *curl2d(Grid &par, double *Ax, double *Ay){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int size = sizeof(double) * xDim * yDim; double *curl; curl = (double *)calloc(size, sizeof(double)); int index; // Note: To take the curl, we need a change in x and y to create a dx or dy // For this reason, we have added yDim to y and 1 to x for (int i = 0; i < yDim-1; i++){ for (int j = 0; j < xDim-1; j++){ index = j + xDim * i; curl[index] = (Ay[index] - Ay[index+1]) - (Ax[index] - Ax[index+yDim]); } } return curl; } double *curl3d_r(Grid &par, double *Bx, double *By){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int size = sizeof(double) * xDim * yDim * zDim; double *curl; curl = (double *)malloc(size); for (int i = 0; i < xDim*yDim*zDim; ++i){ curl[i] = sqrt(Bx[i]*Bx[i] + By[i] * By[i]); } return curl; } double *curl3d_phi(Grid &par, double *Bx, double *By){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int size = sizeof(double) * xDim * yDim * zDim; double *curl; curl = (double *)malloc(size); for (int i = 0; i < xDim*yDim*zDim; ++i){ curl[i] = atan2(By[i], Bx[i])+M_PI; } return curl; } // Function to take the curl of Ax and Ay in 2d // note: This is on the cpu, there should be a GPU version too. // Not complete yet! double *curl3d_x(Grid &par, double *Ax, double *Ay, double *Az){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int size = sizeof(double) * xDim * yDim * zDim; double *curl; curl = (double *)calloc(size, sizeof(double)); int index; // Note: To take the curl, we need a change in x and y to create a dx or dy // For this reason, we have added yDim to y and 1 to x for (int i = 0; i < zDim-1; i++){ for (int j = 0; j < yDim-1; j++){ for (int k = 0; k < xDim-1; k++){ index = k + xDim * j + xDim * yDim * i; curl[index] = (Az[index] - Az[index + zDim]) -(Ay[index] - Ay[index + zDim*yDim]); } } } return curl; } // Function to take the curl of Ax and Ay in 2d // note: This is on the cpu, there should be a GPU version too. // Not complete yet! double *curl3d_y(Grid &par, double *Ax, double *Ay, double *Az){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int size = sizeof(double) * xDim * yDim * zDim; double *curl; curl = (double *)calloc(size, sizeof(double)); int index; // Note: To take the curl, we need a change in x and y to create a dx or dy // For this reason, we have added yDim to y and 1 to x for (int i = 0; i < zDim-1; i++){ for (int j = 0; j < yDim-1; j++){ for (int k = 0; k < xDim - 1; k++){ index = k + xDim * j + xDim * yDim * i; curl[index] = -(Az[index] - Az[index + 1]) -(Ax[index] - Ax[index + xDim*yDim]); } } } return curl; } // Function to take the curl of Ax and Ay in 2d // note: This is on the cpu, there should be a GPU version too. // Not complete yet! double *curl3d_z(Grid &par, double *Ax, double *Ay, double *Az){ int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int size = sizeof(double) * xDim * yDim * zDim; double *curl; curl = (double *)calloc(size, sizeof(double)); int index; // Note: To take the curl, we need a change in x and y to create a dx or dy // For this reason, we have added yDim to y and 1 to x for (int i = 0; i < zDim-1; i++){ for (int j = 0; j < yDim-1; j++){ for (int k = 0; k < xDim-1; k++){ index = k + xDim * j + xDim * yDim * i; curl[index] = (Ay[index] - Ay[index + 1]) -(Ax[index] - Ax[index + xDim]); } } } return curl; } // Function to check whether a file exists std::string filecheck(std::string filename){ struct stat buffer = {0}; if (stat(filename.c_str(), &buffer) == -1){ std::cout << "File " << filename << " does not exist!" << '\n'; std::cout << "Please select a new file:" << '\n'; std::cin >> filename; filename = filecheck(filename); } return filename; } // Function to read Ax from file. // Note that this comes with a special method in init... void file_A(std::string filename, double *A, double omega){ std::fstream infile(filename, std::ios_base::in); double inval; int count = 0; while (infile >> inval){ A[count] = omega*inval; count++; } } /*----------------------------------------------------------------------------// * GPU KERNELS *-----------------------------------------------------------------------------*/ // Function to generate momentum grids void generate_p_space(Grid &par){ int dimnum = par.ival("dimnum"); int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); double xMax = par.dval("xMax"); double yMax = 0; if (dimnum > 1){ yMax = par.dval("yMax"); } double zMax = 0; if (dimnum == 3){ zMax = par.dval("zMax"); } double pxMax = par.dval("pxMax"); double pyMax = 0; if (dimnum > 1){ pyMax = par.dval("pyMax"); } double pzMax = 0; if (dimnum == 3){ pzMax = par.dval("pzMax"); } double dx = par.dval("dx"); double dy = 0; if (dimnum > 1){ dy = par.dval("dy"); } double dz = 0; if (dimnum == 3){ dz = par.dval("dz"); } double dpx = par.dval("dpx"); double dpy = 0; if (dimnum > 1){ dpy = par.dval("dpy"); } double dpz = 0; if (dimnum == 3){ dpz = par.dval("dpz"); } double *x, *y, *z, *px, *py, *pz, *x_gpu, *y_gpu, *z_gpu, *px_gpu, *py_gpu, *pz_gpu; x = (double *) malloc(sizeof(double) * xDim); y = (double *) malloc(sizeof(double) * yDim); z = (double *) malloc(sizeof(double) * zDim); px = (double *) malloc(sizeof(double) * xDim); py = (double *) malloc(sizeof(double) * yDim); pz = (double *) malloc(sizeof(double) * zDim); if (dimnum == 2){ for(int i=0; i<xDim/2; ++i){ x[i] = -xMax + i*dx; x[i + (xDim/2)] = i*dx; px[i] = i*dpx; px[i + (xDim/2)] = -pxMax + i*dpx; } for(int i=0; i<yDim/2; ++i){ y[i] = -yMax + i*dy; y[i + (yDim/2)] = i*dy; py[i] = i*dpy; py[i + (yDim/2)] = -pyMax + i*dpy; } for(int i = 0; i < zDim; ++i){ z[i] = 0; pz[i] = 0; } } else if(dimnum == 3){ for(int i=0; i<xDim/2; ++i){ x[i] = -xMax + i*dx; x[i + (xDim/2)] = i*dx; px[i] = i*dpx; px[i + (xDim/2)] = -pxMax + i*dpx; } for(int i=0; i<yDim/2; ++i){ y[i] = -yMax + i*dy; y[i + (yDim/2)] = i*dy; py[i] = i*dpy; py[i + (yDim/2)] = -pyMax + i*dpy; } for(int i=0; i<zDim/2; ++i){ z[i] = -zMax + i*dz; z[i + (zDim/2)] = i*dz; pz[i] = i*dpz; pz[i + (zDim/2)] = -pzMax + i*dpz; } } else if (dimnum == 1){ for(int i=0; i<xDim/2; ++i){ x[i] = -xMax + i*dx; x[i + (xDim/2)] = i*dx; px[i] = i*dpx; px[i + (xDim/2)] = -pxMax + i*dpx; } for(int i = 0; i < zDim; ++i){ z[i] = 0; pz[i] = 0; y[i] = 0; py[i] = 0; } } par.store("x",x); par.store("y",y); par.store("z",z); par.store("px",px); par.store("py",py); par.store("pz",pz); // Now move these items to the gpu cudaHandleError( cudaMalloc((void**) &x_gpu, sizeof(double) * xDim) ); cudaHandleError( cudaMalloc((void**) &y_gpu, sizeof(double) * yDim) ); cudaHandleError( cudaMalloc((void**) &z_gpu, sizeof(double) * zDim) ); cudaHandleError( cudaMalloc((void**) &px_gpu, sizeof(double) * xDim) ); cudaHandleError( cudaMalloc((void**) &py_gpu, sizeof(double) * yDim) ); cudaHandleError( cudaMalloc((void**) &pz_gpu, sizeof(double) * zDim) ); cudaHandleError( cudaMemcpy(x_gpu, x, sizeof(double)*xDim, cudaMemcpyHostToDevice) ); cudaHandleError( cudaMemcpy(y_gpu, y, sizeof(double)*yDim, cudaMemcpyHostToDevice) ); cudaHandleError( cudaMemcpy(z_gpu, z, sizeof(double)*zDim, cudaMemcpyHostToDevice) ); cudaHandleError( cudaMemcpy(px_gpu, px, sizeof(double)*xDim, cudaMemcpyHostToDevice) ); cudaHandleError( cudaMemcpy(py_gpu, py, sizeof(double)*yDim, cudaMemcpyHostToDevice) ); cudaHandleError( cudaMemcpy(pz_gpu, pz, sizeof(double)*zDim, cudaMemcpyHostToDevice) ); par.store("x_gpu",x_gpu); par.store("y_gpu",y_gpu); par.store("z_gpu",z_gpu); par.store("px_gpu",px_gpu); par.store("py_gpu",py_gpu); par.store("pz_gpu",pz_gpu); } // This function is basically a wrapper to call the appropriate K kernel void generate_K(Grid &par){ // For k, we need xp, yp, and zp. These will also be used in generating // pAxyz parameters, so it should already be stored in par. double *px_gpu = par.dsval("px_gpu"); double *py_gpu = par.dsval("py_gpu"); double *pz_gpu = par.dsval("pz_gpu"); double gSize = par.ival("gSize"); double mass = par.dval("mass"); // Creating K to work with double *K, *K_gpu; K = (double*)malloc(sizeof(double)*gSize); cudaHandleError( cudaMalloc((void**) &K_gpu, sizeof(double)*gSize) ); simple_K<<<par.grid, par.threads>>>(px_gpu, py_gpu, pz_gpu, mass, K_gpu); cudaCheckError(); cudaHandleError( cudaMemcpy(K, K_gpu, sizeof(double)*gSize, cudaMemcpyDeviceToHost) ); par.store("K",K); par.store("K_gpu",K_gpu); } // Simple kernel for generating K __global__ void simple_K(double *xp, double *yp, double *zp, double mass, double *K){ unsigned int gid = getGid3d3d(); unsigned int xid = blockDim.x*blockIdx.x + threadIdx.x; unsigned int yid = blockDim.y*blockIdx.y + threadIdx.y; unsigned int zid = blockDim.z*blockIdx.z + threadIdx.z; K[gid] = (HBAR*HBAR/(2*mass))*(xp[xid]*xp[xid] + yp[yid]*yp[yid] + zp[zid]*zp[zid]); } // Function to generate game fields void generate_gauge(Grid &par){ int gSize = par.ival("gSize"); int dimnum = par.ival("dimnum"); double *Ax, *Ay, *Az, *Ax_gpu, *Ay_gpu, *Az_gpu; double *x_gpu = par.dsval("x_gpu"); double *y_gpu = par.dsval("y_gpu"); double *z_gpu = par.dsval("z_gpu"); double xMax = par.dval("xMax"); double yMax = par.dval("yMax"); double zMax = 1; if (dimnum == 3){ zMax = par.dval("zMax"); } double omegaX = par.dval("omegaX"); double omegaY = par.dval("omegaY"); double omegaZ; if (dimnum == 3){ omegaZ = par.dval("omegaZ"); } double omega = par.dval("omega"); double fudge = par.dval("fudge"); Ax = (double *)malloc(sizeof(double)*gSize); Ay = (double *)malloc(sizeof(double)*gSize); Az = (double *)malloc(sizeof(double)*gSize); cudaHandleError( cudaMalloc((void**) &Ax_gpu, sizeof(double)*gSize) ); cudaHandleError( cudaMalloc((void**) &Ay_gpu, sizeof(double)*gSize) ); cudaHandleError( cudaMalloc((void**) &Az_gpu, sizeof(double)*gSize) ); if (par.Afn == "file"){ file_A(par.Axfile, Ax, omega); cudaHandleError( cudaMemcpy(Ax_gpu, Ax, sizeof(double)*gSize, cudaMemcpyHostToDevice) ); if (dimnum > 1){ file_A(par.Ayfile, Ay, omega); cudaHandleError( cudaMemcpy(Ay_gpu,Ay,sizeof(double)*gSize,cudaMemcpyHostToDevice) ); } if (dimnum == 3){ file_A(par.Azfile, Az, omega); cudaHandleError( cudaMemcpy(Az_gpu,Az,sizeof(double)*gSize,cudaMemcpyHostToDevice) ); } std::cout << "finished reading Ax / Ay / Az from file" << '\n'; } else{ if (par.is_ast_gpu("Ax")){ double dx = par.dval("dx"); double dy = par.dval("dy"); double dz = par.dval("dz"); double xMax = par.dval("xMax"); double yMax = par.dval("yMax"); double zMax = 0; if (dimnum == 3){ zMax = par.dval("zMax"); } EqnNode_gpu *eqn = par.astval("Ax"); find_field<<<par.grid, par.threads>>>(Ax_gpu, dx, dy, dz, xMax, yMax, zMax, 0, eqn); cudaCheckError(); } else{ par.Ax_fn<<<par.grid, par.threads>>>(x_gpu, y_gpu, z_gpu, xMax, yMax, zMax, omegaX, omegaY, omegaZ, omega, fudge, Ax_gpu); cudaCheckError(); } if (par.is_ast_gpu("Ay")){ double dx = par.dval("dx"); double dy = par.dval("dy"); double dz = par.dval("dz"); double xMax = par.dval("xMax"); double yMax = par.dval("yMax"); double zMax = 0; if (dimnum == 3){ zMax = par.dval("zMax"); } EqnNode_gpu *eqn = par.astval("Ay"); find_field<<<par.grid, par.threads>>>(Ay_gpu, dx, dy, dz, xMax, yMax, zMax , 0, eqn); cudaCheckError(); } else{ par.Ay_fn<<<par.grid, par.threads>>>(x_gpu, y_gpu, z_gpu, xMax, yMax, zMax, omegaX, omegaY, omegaZ, omega, fudge, Ay_gpu); cudaCheckError(); } if (dimnum == 3){ if (par.is_ast_gpu("Az")){ double dx = par.dval("dx"); double dy = par.dval("dy"); double dz = par.dval("dz"); double xMax = par.dval("xMax"); double yMax = par.dval("yMax"); double zMax = 0; if (dimnum == 3){ zMax = par.dval("zMax"); } EqnNode_gpu *eqn = par.astval("Az"); find_field<<<par.grid, par.threads>>>(Az_gpu, dx, dy, dz, xMax, yMax, zMax, 0, eqn); cudaCheckError(); } else{ par.Az_fn<<<par.grid, par.threads>>>(x_gpu, y_gpu, z_gpu, xMax, yMax, zMax, omegaX, omegaY, omegaZ, omega, fudge, Az_gpu); cudaCheckError(); } } else{ kconstant_A<<<par.grid, par.threads>>>(x_gpu, y_gpu, z_gpu, xMax, yMax, zMax, omegaX, omegaY, omegaZ, omega, fudge, Az_gpu); cudaCheckError(); } } cudaHandleError( cudaMemcpy(Ax, Ax_gpu, sizeof(double)*gSize,cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(Ay, Ay_gpu, sizeof(double)*gSize,cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(Az, Az_gpu, sizeof(double)*gSize,cudaMemcpyDeviceToHost) ); par.store("Ax", Ax); par.store("Ay", Ay); par.store("Az", Az); par.store("Ax_gpu", Ax_gpu); par.store("Ay_gpu", Ay_gpu); par.store("Az_gpu", Az_gpu); } // constant Kernel A __global__ void kconstant_A(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); A[gid] = 0; } // Kernel for simple rotational case, Ax __global__ void krotation_Ax(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int yid = blockDim.y*blockIdx.y + threadIdx.y; A[gid] = -y[yid] * omega * omegaX; } // Kernel for simple rotational case, Ay __global__ void krotation_Ay(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; A[gid] = x[xid] * omega * omegaY; } // Kernel for simple rotational case, Ax __global__ void kring_rotation_Ax(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double theta = atan2(y[yid],x[xid]); A[gid] = (z[zid]+zMax)*cos(theta)*omega*omegaX; } // Kernel for simple rotational case, Ay __global__ void kring_rotation_Ay(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double theta = atan2(y[yid],x[xid]); A[gid] = (z[zid]+zMax)*sin(theta)*omega*omegaX; } // Kernel for simple rotational case, Az __global__ void kring_rotation_Az(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; double r = sqrt(x[xid]*x[xid] + y[yid]*y[yid]); A[gid] = r*omega*omegaX; } // kernel for a simple vortex ring __global__ void kring_Az(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; double rad = sqrt(x[xid]*x[xid] + y[yid]*y[yid]); A[gid] = omega * exp(-rad*rad / (0.0001*xMax)) * 0.01; } // testing kernel Ax __global__ void ktest_Ax(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); int yid = blockDim.y*blockIdx.y + threadIdx.y; A[gid] = (sin(y[yid] * 100000)+1) * yMax * omega; } // testing kernel Ay __global__ void ktest_Ay(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){ int gid = getGid3d3d(); A[gid] = 0; } // function to generate V void generate_fields(Grid &par){ generate_p_space(par); generate_K(par); generate_gauge(par); int gSize = par.ival("gSize"); int dimnum = par.ival("dimnum"); int winding = par.dval("winding"); bool energy_calc = par.bval("energy_calc"); double dt = par.dval("dt"); double gdt = par.dval("gdt"); double *x_gpu = par.dsval("x_gpu"); double *y_gpu = par.dsval("y_gpu"); double *z_gpu = par.dsval("z_gpu"); double *px_gpu = par.dsval("px_gpu"); double *py_gpu = par.dsval("py_gpu"); double *pz_gpu = par.dsval("pz_gpu"); double *Ax_gpu = par.dsval("Ax_gpu"); double *Ay_gpu = par.dsval("Ay_gpu"); double *Az_gpu = par.dsval("Az_gpu"); double *K_gpu = par.dsval("K_gpu"); // Creating items list for kernels double *items, *items_gpu; int item_size = 18; items = (double*)malloc(sizeof(double)*item_size); cudaHandleError( cudaMalloc((void**) &items_gpu, sizeof(double)*item_size) ); for (int i = 0; i < item_size; ++i){ items[i] = 0; } items[0] = par.dval("xMax"); items[1] = par.dval("yMax"); if (dimnum == 3){ items[2] = par.dval("zMax"); } items[3] = par.dval("omegaX"); items[4] = par.dval("omegaY"); if (dimnum == 3){ items[5] = par.dval("omegaZ"); } items[6] = par.dval("x0_shift"); items[7] = par.dval("y0_shift"); if (dimnum == 3){ items[8] = par.dval("z0_shift"); } else{ items[8] = 0.0; } items[9] = par.dval("mass"); items[10] = par.dval("gammaY"); items[11] = 1.0; // For gammaZ items[12] = par.dval("fudge"); items[13] = 0.0; // For time items[14] = par.dval("Rxy"); items[15] = par.dval("a0x"); items[16] = par.dval("a0y"); if (dimnum == 3){ items[17] = par.dval("a0z"); } else{ items[17] = 1.0; } cudaHandleError( cudaMemcpy(items_gpu, items, sizeof(double)*item_size, cudaMemcpyHostToDevice) ); double fudge = par.dval("fudge"); // Generating V double *V, *V_gpu; V = (double *)malloc(sizeof(double)*gSize); cudaMalloc((void **) &V_gpu, sizeof(double)*gSize); if (par.is_ast_gpu("V")){ double dx = par.dval("dx"); double dy = par.dval("dy"); double dz = par.dval("dz"); double xMax = par.dval("xMax"); double yMax = par.dval("yMax"); double zMax = 0; if (dimnum == 3){ zMax = par.dval("zMax"); } EqnNode_gpu *eqn = par.astval("V"); find_field<<<par.grid, par.threads>>>(V_gpu, dx, dy, dz, xMax, yMax, zMax, 0, eqn); cudaCheckError(); } else{ par.V_fn<<<par.grid, par.threads>>>(x_gpu, y_gpu, z_gpu, items_gpu, Ax_gpu, Ay_gpu, Az_gpu, V_gpu); cudaCheckError(); } cudaHandleError( cudaMemcpy(V, V_gpu, sizeof(double)*gSize, cudaMemcpyDeviceToHost) ); // Generating wfc double2 *wfc, *wfc_gpu; double *phi, *phi_gpu; wfc = (double2 *)malloc(sizeof(double2)*gSize); phi = (double *)malloc(sizeof(double)*gSize); cudaHandleError( cudaMalloc((void**) &wfc_gpu, sizeof(double2)*gSize) ); cudaHandleError( cudaMalloc((void**) &phi_gpu, sizeof(double)*gSize) ); if (par.bval("read_wfc")){ wfc = par.cufftDoubleComplexval("wfc"); cudaHandleError( cudaMemcpy(wfc_gpu, wfc, sizeof(double2)*gSize, cudaMemcpyHostToDevice) ); } else{ par.wfc_fn<<<par.grid, par.threads>>>(x_gpu, y_gpu, z_gpu, items_gpu, winding, phi_gpu, wfc_gpu); cudaCheckError(); cudaHandleError( cudaMemcpy(wfc, wfc_gpu, sizeof(double2)*gSize, cudaMemcpyDeviceToHost) ); } cudaHandleError( cudaMemcpy(phi, phi_gpu, sizeof(double)*gSize, cudaMemcpyDeviceToHost) ); // generating aux fields. double2 *GV, *EV, *GK, *EK; double2 *GV_gpu, *EV_gpu, *GK_gpu, *EK_gpu; double2 *GpAx, *GpAy, *GpAz, *EpAx, *EpAy, *EpAz; double2 *GpAx_gpu, *GpAy_gpu, *GpAz_gpu, *EpAx_gpu, *EpAy_gpu, *EpAz_gpu; double *pAx, *pAy, *pAz; double *pAx_gpu, *pAy_gpu, *pAz_gpu; GV = (double2 *)malloc(sizeof(double2)*gSize); EV = (double2 *)malloc(sizeof(double2)*gSize); GK = (double2 *)malloc(sizeof(double2)*gSize); EK = (double2 *)malloc(sizeof(double2)*gSize); GpAx = (double2 *)malloc(sizeof(double2)*gSize); EpAx = (double2 *)malloc(sizeof(double2)*gSize); GpAy = (double2 *)malloc(sizeof(double2)*gSize); EpAy = (double2 *)malloc(sizeof(double2)*gSize); GpAz = (double2 *)malloc(sizeof(double2)*gSize); EpAz = (double2 *)malloc(sizeof(double2)*gSize); pAx = (double *)malloc(sizeof(double)*gSize); pAy = (double *)malloc(sizeof(double)*gSize); pAz = (double *)malloc(sizeof(double)*gSize); cudaHandleError( cudaMalloc((void**) &GV_gpu, sizeof(double2)*gSize) ); cudaHandleError( cudaMalloc((void**) &EV_gpu, sizeof(double2)*gSize) ); cudaHandleError( cudaMalloc((void**) &GK_gpu, sizeof(double2)*gSize) ); cudaHandleError( cudaMalloc((void**) &EK_gpu, sizeof(double2)*gSize) ); cudaHandleError( cudaMalloc((void**) &GpAx_gpu, sizeof(double2)*gSize) ); cudaHandleError( cudaMalloc((void**) &EpAx_gpu, sizeof(double2)*gSize) ); cudaHandleError( cudaMalloc((void**) &GpAy_gpu, sizeof(double2)*gSize) ); cudaHandleError( cudaMalloc((void**) &EpAy_gpu, sizeof(double2)*gSize) ); cudaHandleError( cudaMalloc((void**) &GpAz_gpu, sizeof(double2)*gSize) ); cudaHandleError( cudaMalloc((void**) &EpAz_gpu, sizeof(double2)*gSize) ); cudaHandleError( cudaMalloc((void**) &pAx_gpu, sizeof(double)*gSize) ); cudaHandleError( cudaMalloc((void**) &pAy_gpu, sizeof(double)*gSize) ); cudaHandleError( cudaMalloc((void**) &pAz_gpu, sizeof(double)*gSize) ); aux_fields<<<par.grid, par.threads>>>(V_gpu, K_gpu, gdt, dt, Ax_gpu, Ay_gpu, Az_gpu, px_gpu, py_gpu, pz_gpu, pAx_gpu, pAy_gpu, pAz_gpu, GV_gpu, EV_gpu, GK_gpu, EK_gpu, GpAx_gpu, GpAy_gpu, GpAz_gpu, EpAx_gpu, EpAy_gpu, EpAz_gpu); cudaCheckError(); cudaHandleError( cudaMemcpy(GV, GV_gpu, sizeof(double2)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(EV, EV_gpu, sizeof(double2)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(GK, GK_gpu, sizeof(double2)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(EK, EK_gpu, sizeof(double2)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(GpAx, GpAx_gpu, sizeof(double2)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(EpAx, EpAx_gpu, sizeof(double2)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(GpAy, GpAy_gpu, sizeof(double2)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(EpAy, EpAy_gpu, sizeof(double2)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(GpAz, GpAz_gpu, sizeof(double2)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(EpAz, EpAz_gpu, sizeof(double2)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(pAx, pAx_gpu, sizeof(double)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(pAy, pAy_gpu, sizeof(double)*gSize, cudaMemcpyDeviceToHost) ); cudaHandleError( cudaMemcpy(pAz, pAz_gpu, sizeof(double)*gSize, cudaMemcpyDeviceToHost) ); // Storing variables cudaHandleError( cudaFree(items_gpu) ); //cudaFree(phi_gpu); cudaHandleError( cudaFree(GV_gpu) ); cudaHandleError( cudaFree(EV_gpu) ); cudaHandleError( cudaFree(GK_gpu) ); cudaHandleError( cudaFree(EK_gpu) ); cudaHandleError( cudaFree(pAx_gpu) ); cudaHandleError( cudaFree(pAy_gpu) ); cudaHandleError( cudaFree(pAz_gpu) ); cudaHandleError( cudaFree(GpAx_gpu) ); cudaHandleError( cudaFree(GpAy_gpu) ); cudaHandleError( cudaFree(GpAz_gpu) ); cudaHandleError( cudaFree(EpAx_gpu) ); cudaHandleError( cudaFree(EpAy_gpu) ); cudaHandleError( cudaFree(EpAz_gpu) ); cudaHandleError( cudaFree(x_gpu) ); cudaHandleError( cudaFree(y_gpu) ); cudaHandleError( cudaFree(z_gpu) ); cudaHandleError( cudaFree(px_gpu) ); cudaHandleError( cudaFree(py_gpu) ); cudaHandleError( cudaFree(pz_gpu) ); if (!energy_calc){ cudaHandleError( cudaFree(K_gpu) ); cudaHandleError( cudaFree(V_gpu) ); cudaHandleError( cudaFree(Ax_gpu) ); cudaHandleError( cudaFree(Ay_gpu) ); cudaHandleError( cudaFree(Az_gpu) ); } else{ par.store("V_gpu",V_gpu); } par.store("V",V); par.store("items", items); //par.store("items_gpu", items_gpu); par.store("wfc", wfc); par.store("wfc_gpu", wfc_gpu); par.store("Phi", phi); par.store("Phi_gpu", phi_gpu); par.store("GV",GV); par.store("EV",EV); par.store("GK",GK); par.store("EK",EK); //par.store("GV_gpu",GV_gpu); //par.store("EV_gpu",EV_gpu); //par.store("GK_gpu",GK_gpu); //par.store("EK_gpu",EK_gpu); par.store("GpAx",GpAx); par.store("EpAx",EpAx); par.store("GpAy",GpAy); par.store("EpAy",EpAy); par.store("GpAz",GpAz); par.store("EpAz",EpAz); par.store("pAx",pAx); par.store("pAy",pAy); par.store("pAz",pAz); //par.store("pAx_gpu",pAx_gpu); //par.store("pAy_gpu",pAy_gpu); //par.store("pAz_gpu",pAz_gpu); } __global__ void kharmonic_V(double *x, double *y, double *z, double* items, double *Ax, double *Ay, double *Az, double *V){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double V_x = items[3]*(x[xid]+items[6]); double V_y = items[10]*items[4]*(y[yid]+items[7]); double V_z = items[11]*items[5]*(z[zid]+items[8]); V[gid] = 0.5*items[9]*((V_x*V_x + V_y*V_y + V_z*V_z) + (Ax[gid]*Ax[gid] + Ay[gid]*Ay[gid] + Az[gid]*Az[gid])); } // kernel for simple 3d torus trapping potential __global__ void ktorus_V(double *x, double *y, double *z, double* items, double *Ax, double *Ay, double *Az, double *V){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double rad = sqrt((x[xid] - items[6]) * (x[xid] - items[6]) + (y[yid] - items[7]) * (y[yid] - items[7])) - 0.5*items[0]; double omegaR = (items[3]*items[3] + items[4]*items[4]); double V_tot = (2*items[5]*items[5]*(z[zid] - items[8])*(z[zid] - items[8]) + omegaR*(rad*rad + items[12]*rad*z[zid])); V[gid] = 0.5*items[9]*(V_tot + Ax[gid]*Ax[gid] + Ay[gid]*Ay[gid] + Az[gid]*Az[gid]); } __global__ void kstd_wfc(double *x, double *y, double *z, double *items, double winding, double *phi, double2 *wfc){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; phi[gid] = -fmod(winding*atan2(y[yid], x[xid]),2*PI); wfc[gid].x = exp(-(x[xid]*x[xid]/(items[14]*items[14]*items[15]*items[15]) + y[yid]*y[yid]/(items[14]*items[14]*items[16]*items[16]) + z[zid]*z[zid]/(items[14]*items[14]*items[17]*items[17]))) * cos(phi[gid]); wfc[gid].y = -exp(-(x[xid]*x[xid]/(items[14]*items[14]*items[15]*items[15]) + y[yid]*y[yid]/(items[14]*items[14]*items[16]*items[16]) + z[zid]*z[zid]/(items[14]*items[14]*items[17]*items[17]))) * sin(phi[gid]); } __global__ void ktorus_wfc(double *x, double *y, double *z, double *items, double winding, double *phi, double2 *wfc){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; double rad = sqrt((x[xid] - items[6]) * (x[xid] - items[6]) + (y[yid] - items[7]) * (y[yid] - items[7])) - 0.5*items[0]; wfc[gid].x = exp(-( pow((rad)/(items[14]*items[15]*0.5),2) + pow((z[zid])/(items[14]*items[17]*0.5),2) ) ); wfc[gid].y = 0.0; } __global__ void aux_fields(double *V, double *K, double gdt, double dt, double* Ax, double *Ay, double* Az, double *px, double *py, double *pz, double* pAx, double* pAy, double* pAz, double2* GV, double2* EV, double2* GK, double2* EK, double2* GpAx, double2* GpAy, double2* GpAz, double2* EpAx, double2* EpAy, double2* EpAz){ int gid = getGid3d3d(); int xid = blockDim.x*blockIdx.x + threadIdx.x; int yid = blockDim.y*blockIdx.y + threadIdx.y; int zid = blockDim.z*blockIdx.z + threadIdx.z; GV[gid].x = exp(-V[gid]*(gdt/(2*HBAR))); GK[gid].x = exp(-K[gid]*(gdt/HBAR)); GV[gid].y = 0.0; GK[gid].y = 0.0; // Ax and Ay will be calculated here but are used only for // debugging. They may be needed later for magnetic field calc pAx[gid] = Ax[gid] * px[xid]; pAy[gid] = Ay[gid] * py[yid]; pAz[gid] = Az[gid] * pz[zid]; GpAx[gid].x = exp(-pAx[gid]*gdt); GpAx[gid].y = 0; GpAy[gid].x = exp(-pAy[gid]*gdt); GpAy[gid].y = 0; GpAz[gid].x = exp(-pAz[gid]*gdt); GpAz[gid].y = 0; EV[gid].x=cos(-V[gid]*(dt/(2*HBAR))); EV[gid].y=sin(-V[gid]*(dt/(2*HBAR))); EK[gid].x=cos(-K[gid]*(dt/HBAR)); EK[gid].y=sin(-K[gid]*(dt/HBAR)); EpAz[gid].x=cos(-pAz[gid]*dt); EpAz[gid].y=sin(-pAz[gid]*dt); EpAy[gid].x=cos(-pAy[gid]*dt); EpAy[gid].y=sin(-pAy[gid]*dt); EpAx[gid].x=cos(-pAx[gid]*dt); EpAx[gid].y=sin(-pAx[gid]*dt); } // Function to generate grids and treads for 2d and 3d cases void generate_grid(Grid& par){ int dimnum = par.ival("dimnum"); int xDim = par.ival("xDim"); int yDim = par.ival("yDim"); int zDim = par.ival("zDim"); int xD = 1, yD = 1, zD = 1; int max_threads = 256; if (xDim < max_threads){ max_threads = xDim; } if (dimnum == 2){ if (xDim <= max_threads){ par.threads.x = xDim; par.threads.y = 1; par.threads.z = 1; xD = 1; yD = yDim; zD = 1; } else{ int count = 0; int dim_tmp = xDim; while (dim_tmp > max_threads){ count++; dim_tmp /= 2; } std::cout << "count is: " << count << '\n'; par.threads.x = dim_tmp; par.threads.y = 1; par.threads.z = 1; xD = pow(2,count); yD = yDim; zD = 1; } } else if (dimnum == 3){ if (xDim <= max_threads){ par.threads.x = xDim; par.threads.y = 1; par.threads.z = 1; xD = 1; yD = yDim; zD = zDim; } else{ int count = 0; int dim_tmp = xDim; while (dim_tmp > max_threads){ count++; dim_tmp /= 2; } std::cout << "count is: " << count << '\n'; par.threads.x = dim_tmp; par.threads.y = 1; par.threads.z = 1; xD = pow(2,count); yD = yDim; zD = zDim; } } else if (dimnum == 1){ par.threads.x = xDim; } par.grid.x=xD; par.grid.y=yD; par.grid.z=zD; std::cout << "threads in x are: " << par.threads.x << '\n'; std::cout << "dimensions are: " << par.grid.x << '\t' << par.grid.y << '\t' << par.grid.z << '\n'; }
4d40cd1537b58b3d710ff2f48ed25e2e68c8ab53.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <exceptions/cuda_exception.h> #include <execution/LaunchContext.h> #include <helpers/DebugHelper.h> #include <loops/legacy_ops.h> #include <loops/reduce_long.h> #include <loops/scalar.h> #include <system/op_boilerplate.h> #include <types/types.h> using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> SD_KERNEL void simpleReduce(const void *x, const sd::LongType *outerXTadShapeInfo, const sd::LongType *innerXTadShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const sd::LongType *zShapeInfo) { functions::reduce::ReduceLongFunction<X, Z>::template transformCudaXD<OpType>( x, outerXTadShapeInfo, innerXTadShapeInfo, extraParams, vreductionBuffer, z, zShapeInfo); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> SD_DEVICE void reduceScalarGeneric(const void *x, const sd::LongType *xShapeInfo, void *extraParams, void *z, const sd::LongType *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const sd::LongType *tadOnlyShapeInfo) { functions::reduce::ReduceLongFunction<X, Z>::template execScalarCuda<OpType>( x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> SD_KERNEL void simpleScalar(const void *x, const sd::LongType *xShapeInfo, void *extraParams, void *z, const sd::LongType *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const sd::LongType *tadOnlyShapeInfo) { reduceScalarGeneric<X, Z, OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); } namespace functions { namespace reduce { //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ReduceLongFunction<X, Z>::aggregatePartials(void *vsPartials, sd::LongType tid, sd::LongType numItems, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto sPartials = reinterpret_cast<Z *>(vsPartials); auto extraParams = reinterpret_cast<X *>(vextraParams); sd::LongType floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1; if (tid >= floorPow2) sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams); __syncthreads(); } for (sd::LongType activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams); __syncthreads(); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ReduceLongFunction<X, Z>::transformCudaXD(const void *vx, const sd::LongType *outerXTadShapeInfo, const sd::LongType *innerXTadShapeInfo, void *vextraParams, void *vreductionBuffer, void *vz, const sd::LongType *zShapeInfo) { auto x = reinterpret_cast<const X *>(vx); auto z = reinterpret_cast<Z *>(vz); auto extraParams = reinterpret_cast<X *>(vextraParams); // shared memory space for storing intermediate results __shared__ Z sPartials[SD_CUDA_BLOCK_SIZE]; __shared__ int tadLen, numTads; __shared__ bool sameOffsets; if (threadIdx.x == 0) { sameOffsets = shape::haveSameShapeAndStrides(zShapeInfo, outerXTadShapeInfo); tadLen = shape::length(innerXTadShapeInfo); numTads = shape::length(outerXTadShapeInfo); } __syncthreads(); int coords[SD_MAX_RANK]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { shape::index2coords(r, outerXTadShapeInfo, coords); const auto outerOffset = shape::getOffset(outerXTadShapeInfo, coords); const auto zOffset = sameOffsets ? outerOffset : shape::getOffset(zShapeInfo, coords); const X *xTad = x + outerOffset; sPartials[threadIdx.x] = OpType::startingValue(xTad); for (int i = threadIdx.x; i < tadLen; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(xTad[shape::getIndexOffset(i, innerXTadShapeInfo)], extraParams), extraParams); __syncthreads(); // aggregate. do NOT reduce for elements > tadLen aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLen), extraParams); __syncthreads(); if (threadIdx.x == 0) z[zOffset] = OpType::postProcess(sPartials[threadIdx.x], tadLen, extraParams); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ReduceLongFunction<X, Z>::execScalarCuda(const void *vx, const sd::LongType *xShapeInfo, void *vextraParams, void *vz, const sd::LongType *zShapeInfo, void *vreductionBuffer, const sd::LongType *tadOnlyShapeInfo) { auto x = reinterpret_cast<const X *>(vx); auto z = reinterpret_cast<Z *>(vz); auto extraParams = reinterpret_cast<X *>(vextraParams); auto reductionBuffer = reinterpret_cast<Z *>(vreductionBuffer); auto tid = blockDim.x * blockIdx.x + threadIdx.x; // shared memory space for storing intermediate results __shared__ Z sPartials[SD_CUDA_BLOCK_SIZE]; __shared__ sd::LongType xEws; __shared__ sd::LongType len; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); len = shape::length(xShapeInfo); } __syncthreads(); sPartials[threadIdx.x] = OpType::startingValue(x); if (xEws > 0) for (int i = tid; i < len; i += (blockDim.x * gridDim.x)) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams); else for (int i = tid; i < len; i += blockDim.x * gridDim.x) sPartials[threadIdx.x] = OpType::update( sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, len), extraParams); __syncthreads(); if (gridDim.x > 1) { auto tc = reinterpret_cast<unsigned int *>(reductionBuffer); __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) reductionBuffer[blockIdx.x] = sPartials[0]; // this->postProcess(sPartials[0],len,extraParams); __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(x); for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(gridDim.x, blockDim.x), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } else { if (threadIdx.x == 0) { auto tc = reinterpret_cast<unsigned int *>(reductionBuffer); tc[16384] = 0; z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_HOST void ReduceLongFunction<X, Z>::intermediateXD(dim3 launchDims, hipStream_t *stream, const void *x, const sd::LongType *dXShapeInfo, const sd::LongType *hXShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const sd::LongType *dZShapeInfo, const sd::LongType *hZShapeInfo, const int *dims) { if (shape::isEmpty(hXShapeInfo)) { if (shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X *>(x))); auto res = hipMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(Z), hipMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res); auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer(); // scalar assign functions::scalar::ScalarTransform<Z, Z, Z>::executeCudaShaped(launchDims, stream, 14, z, dZShapeInfo, hXShapeInfo, z, dZShapeInfo, hZShapeInfo, ptr, nullptr); } else { const int zRank = shape::rank(hZShapeInfo); const int tadRank = shape::rank(hXShapeInfo) - zRank; auto outerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims, zRank); auto innerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims + zRank, tadRank); hipLaunchKernelGGL(( simpleReduce<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, reinterpret_cast<sd::LongType const *>(outerPack.special()), reinterpret_cast<sd::LongType const *>(innerPack.special()), extraParams, vreductionBuffer, z, dZShapeInfo); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_HOST void ReduceLongFunction<X, Z>::intermediateScalar(dim3 launchDims, hipStream_t *stream, const void *x, const sd::LongType *xShapeInfo, const sd::LongType *hXShapeInfo, void *extraParams, void *z, const sd::LongType *zShapeInfo, const sd::LongType *hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const sd::LongType *tadOnlyShapeInfo) { if (shape::isEmpty(hXShapeInfo)) { if (shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X *>(x))); auto res = hipMemcpyAsync(z, &startingVal, sizeof(Z), hipMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateScalar: failed to copy resulting scalar", res); } else { hipLaunchKernelGGL(( simpleScalar<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> SD_HOST void ReduceLongFunction<X, Y>::execReduceScalar(dim3 launchDims, hipStream_t *stream, const int opNum, const void *x, const sd::LongType *xShapeInfo, const sd::LongType *hXShapeInfo, void *extraParams, void *z, const sd::LongType *zShapeInfo, const sd::LongType *hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const sd::LongType *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM_TT(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_LONG_OPS)); sd::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> SD_HOST void ReduceLongFunction<X, Y>::execReduceXD(dim3 launchDims, hipStream_t *stream, const int opNum, const void *x, const sd::LongType *dXShapeInfo, const sd::LongType *hXShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const sd::LongType *dZShapeInfo, const sd::LongType *hZShapeInfo, const int *dims) { if (shape::length(hZShapeInfo) == 1) { ReduceLongFunction<X, Y>::execReduceScalar(launchDims, stream, opNum, x, dXShapeInfo, hXShapeInfo, extraParams, z, dZShapeInfo, hZShapeInfo, nullptr, 0, vreductionBuffer, nullptr); } else { DISPATCH_BY_OPNUM_TT(intermediateXD, PARAMS(launchDims, stream, x, dXShapeInfo, hXShapeInfo, extraParams, vreductionBuffer, z, dZShapeInfo, hZShapeInfo, dims), OPS_A(REDUCE_LONG_OPS)); } DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// template <typename X> SD_DEVICE void initializeShared(X *extraParams, X **sPartials, int sMemSize) { int sPartialsLength = sMemSize / sizeof(X); X *sPartialsDeref = (X *)*sPartials; for (int i = 0; i < sPartialsLength; i++) sPartialsDeref[i] = extraParams[0]; } BUILD_DOUBLE_TEMPLATE(template class ReduceLongFunction, , SD_COMMON_TYPES, SD_LONG_TYPES); } // namespace reduce } // namespace functions
4d40cd1537b58b3d710ff2f48ed25e2e68c8ab53.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <exceptions/cuda_exception.h> #include <execution/LaunchContext.h> #include <helpers/DebugHelper.h> #include <loops/legacy_ops.h> #include <loops/reduce_long.h> #include <loops/scalar.h> #include <system/op_boilerplate.h> #include <types/types.h> using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> SD_KERNEL void simpleReduce(const void *x, const sd::LongType *outerXTadShapeInfo, const sd::LongType *innerXTadShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const sd::LongType *zShapeInfo) { functions::reduce::ReduceLongFunction<X, Z>::template transformCudaXD<OpType>( x, outerXTadShapeInfo, innerXTadShapeInfo, extraParams, vreductionBuffer, z, zShapeInfo); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> SD_DEVICE void reduceScalarGeneric(const void *x, const sd::LongType *xShapeInfo, void *extraParams, void *z, const sd::LongType *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const sd::LongType *tadOnlyShapeInfo) { functions::reduce::ReduceLongFunction<X, Z>::template execScalarCuda<OpType>( x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> SD_KERNEL void simpleScalar(const void *x, const sd::LongType *xShapeInfo, void *extraParams, void *z, const sd::LongType *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const sd::LongType *tadOnlyShapeInfo) { reduceScalarGeneric<X, Z, OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); } namespace functions { namespace reduce { //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ReduceLongFunction<X, Z>::aggregatePartials(void *vsPartials, sd::LongType tid, sd::LongType numItems, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto sPartials = reinterpret_cast<Z *>(vsPartials); auto extraParams = reinterpret_cast<X *>(vextraParams); sd::LongType floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1; if (tid >= floorPow2) sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams); __syncthreads(); } for (sd::LongType activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams); __syncthreads(); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ReduceLongFunction<X, Z>::transformCudaXD(const void *vx, const sd::LongType *outerXTadShapeInfo, const sd::LongType *innerXTadShapeInfo, void *vextraParams, void *vreductionBuffer, void *vz, const sd::LongType *zShapeInfo) { auto x = reinterpret_cast<const X *>(vx); auto z = reinterpret_cast<Z *>(vz); auto extraParams = reinterpret_cast<X *>(vextraParams); // shared memory space for storing intermediate results __shared__ Z sPartials[SD_CUDA_BLOCK_SIZE]; __shared__ int tadLen, numTads; __shared__ bool sameOffsets; if (threadIdx.x == 0) { sameOffsets = shape::haveSameShapeAndStrides(zShapeInfo, outerXTadShapeInfo); tadLen = shape::length(innerXTadShapeInfo); numTads = shape::length(outerXTadShapeInfo); } __syncthreads(); int coords[SD_MAX_RANK]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { shape::index2coords(r, outerXTadShapeInfo, coords); const auto outerOffset = shape::getOffset(outerXTadShapeInfo, coords); const auto zOffset = sameOffsets ? outerOffset : shape::getOffset(zShapeInfo, coords); const X *xTad = x + outerOffset; sPartials[threadIdx.x] = OpType::startingValue(xTad); for (int i = threadIdx.x; i < tadLen; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(xTad[shape::getIndexOffset(i, innerXTadShapeInfo)], extraParams), extraParams); __syncthreads(); // aggregate. do NOT reduce for elements > tadLen aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLen), extraParams); __syncthreads(); if (threadIdx.x == 0) z[zOffset] = OpType::postProcess(sPartials[threadIdx.x], tadLen, extraParams); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ReduceLongFunction<X, Z>::execScalarCuda(const void *vx, const sd::LongType *xShapeInfo, void *vextraParams, void *vz, const sd::LongType *zShapeInfo, void *vreductionBuffer, const sd::LongType *tadOnlyShapeInfo) { auto x = reinterpret_cast<const X *>(vx); auto z = reinterpret_cast<Z *>(vz); auto extraParams = reinterpret_cast<X *>(vextraParams); auto reductionBuffer = reinterpret_cast<Z *>(vreductionBuffer); auto tid = blockDim.x * blockIdx.x + threadIdx.x; // shared memory space for storing intermediate results __shared__ Z sPartials[SD_CUDA_BLOCK_SIZE]; __shared__ sd::LongType xEws; __shared__ sd::LongType len; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); len = shape::length(xShapeInfo); } __syncthreads(); sPartials[threadIdx.x] = OpType::startingValue(x); if (xEws > 0) for (int i = tid; i < len; i += (blockDim.x * gridDim.x)) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams); else for (int i = tid; i < len; i += blockDim.x * gridDim.x) sPartials[threadIdx.x] = OpType::update( sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, len), extraParams); __syncthreads(); if (gridDim.x > 1) { auto tc = reinterpret_cast<unsigned int *>(reductionBuffer); __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) reductionBuffer[blockIdx.x] = sPartials[0]; // this->postProcess(sPartials[0],len,extraParams); __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(x); for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(gridDim.x, blockDim.x), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } else { if (threadIdx.x == 0) { auto tc = reinterpret_cast<unsigned int *>(reductionBuffer); tc[16384] = 0; z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_HOST void ReduceLongFunction<X, Z>::intermediateXD(dim3 launchDims, cudaStream_t *stream, const void *x, const sd::LongType *dXShapeInfo, const sd::LongType *hXShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const sd::LongType *dZShapeInfo, const sd::LongType *hZShapeInfo, const int *dims) { if (shape::isEmpty(hXShapeInfo)) { if (shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X *>(x))); auto res = cudaMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(Z), cudaMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res); auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer(); // scalar assign functions::scalar::ScalarTransform<Z, Z, Z>::executeCudaShaped(launchDims, stream, 14, z, dZShapeInfo, hXShapeInfo, z, dZShapeInfo, hZShapeInfo, ptr, nullptr); } else { const int zRank = shape::rank(hZShapeInfo); const int tadRank = shape::rank(hXShapeInfo) - zRank; auto outerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims, zRank); auto innerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims + zRank, tadRank); simpleReduce<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>( x, reinterpret_cast<sd::LongType const *>(outerPack.special()), reinterpret_cast<sd::LongType const *>(innerPack.special()), extraParams, vreductionBuffer, z, dZShapeInfo); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_HOST void ReduceLongFunction<X, Z>::intermediateScalar(dim3 launchDims, cudaStream_t *stream, const void *x, const sd::LongType *xShapeInfo, const sd::LongType *hXShapeInfo, void *extraParams, void *z, const sd::LongType *zShapeInfo, const sd::LongType *hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const sd::LongType *tadOnlyShapeInfo) { if (shape::isEmpty(hXShapeInfo)) { if (shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X *>(x))); auto res = cudaMemcpyAsync(z, &startingVal, sizeof(Z), cudaMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateScalar: failed to copy resulting scalar", res); } else { simpleScalar<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>( x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> SD_HOST void ReduceLongFunction<X, Y>::execReduceScalar(dim3 launchDims, cudaStream_t *stream, const int opNum, const void *x, const sd::LongType *xShapeInfo, const sd::LongType *hXShapeInfo, void *extraParams, void *z, const sd::LongType *zShapeInfo, const sd::LongType *hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, const sd::LongType *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM_TT(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_LONG_OPS)); sd::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> SD_HOST void ReduceLongFunction<X, Y>::execReduceXD(dim3 launchDims, cudaStream_t *stream, const int opNum, const void *x, const sd::LongType *dXShapeInfo, const sd::LongType *hXShapeInfo, void *extraParams, void *vreductionBuffer, void *z, const sd::LongType *dZShapeInfo, const sd::LongType *hZShapeInfo, const int *dims) { if (shape::length(hZShapeInfo) == 1) { ReduceLongFunction<X, Y>::execReduceScalar(launchDims, stream, opNum, x, dXShapeInfo, hXShapeInfo, extraParams, z, dZShapeInfo, hZShapeInfo, nullptr, 0, vreductionBuffer, nullptr); } else { DISPATCH_BY_OPNUM_TT(intermediateXD, PARAMS(launchDims, stream, x, dXShapeInfo, hXShapeInfo, extraParams, vreductionBuffer, z, dZShapeInfo, hZShapeInfo, dims), OPS_A(REDUCE_LONG_OPS)); } DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// template <typename X> SD_DEVICE void initializeShared(X *extraParams, X **sPartials, int sMemSize) { int sPartialsLength = sMemSize / sizeof(X); X *sPartialsDeref = (X *)*sPartials; for (int i = 0; i < sPartialsLength; i++) sPartialsDeref[i] = extraParams[0]; } BUILD_DOUBLE_TEMPLATE(template class ReduceLongFunction, , SD_COMMON_TYPES, SD_LONG_TYPES); } // namespace reduce } // namespace functions
fe5d3b34361a312af2e54ef40ea647d0f81a251a.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <common/err_common.hpp> #include <solve.hpp> #include <platform.hpp> #include <rocblas.h> #include <identity.hpp> #include <memory.hpp> #include <copy.hpp> #include <transpose.hpp> #include <math.hpp> #include <common/err_common.hpp> #include <blas.hpp> #include <lu.hpp> #include <qr.hpp> #include <cstdio> namespace cuda { //cusolverStatus_t cusolverDn<>getrs( // hipsolverDnHandle_t handle, // hipblasOperation_t trans, // int n, int nrhs, // const <> *A, int lda, // const int *devIpiv, // <> *B, int ldb, // int *devInfo ); template<typename T> struct getrs_func_def_t { typedef cusolverStatus_t (*getrs_func_def) ( hipsolverDnHandle_t, hipblasOperation_t, int, int, const T *, int, const int *, T *, int, int *); }; #define SOLVE_FUNC_DEF( FUNC ) \ template<typename T> \ typename FUNC##_func_def_t<T>::FUNC##_func_def \ FUNC##_func(); #define SOLVE_FUNC( FUNC, TYPE, PREFIX ) \ template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() \ { return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusolverDn##PREFIX##FUNC; } \ SOLVE_FUNC_DEF( getrs ) SOLVE_FUNC(getrs , float , S) SOLVE_FUNC(getrs , double , D) SOLVE_FUNC(getrs , cfloat , C) SOLVE_FUNC(getrs , cdouble, Z) //cusolverStatus_t cusolverDn<>geqrf_bufferSize( // hipsolverDnHandle_t handle, // int m, int n, // <> *A, // int lda, // int *Lwork ); // //cusolverStatus_t cusolverDn<>geqrf( // hipsolverDnHandle_t handle, // int m, int n, // <> *A, int lda, // <> *TAU, // <> *Workspace, // int Lwork, int *devInfo ); // //cusolverStatus_t cusolverDn<>mqr( // hipsolverDnHandle_t handle, // hipblasSideMode_t side, hipblasOperation_t trans, // int m, int n, int k, // const double *A, int lda, // const double *tau, // double *C, int ldc, // double *work, // int lwork, int *devInfo); template<typename T> struct geqrf_solve_func_def_t { typedef cusolverStatus_t (*geqrf_solve_func_def) ( hipsolverDnHandle_t, int, int, T *, int, T *, T *, int, int *); }; template<typename T> struct geqrf_solve_buf_func_def_t { typedef cusolverStatus_t (*geqrf_solve_buf_func_def) ( hipsolverDnHandle_t, int, int, T *, int, int *); }; template<typename T> struct mqr_solve_func_def_t { typedef cusolverStatus_t (*mqr_solve_func_def) ( hipsolverDnHandle_t, hipblasSideMode_t, hipblasOperation_t, int, int, int, const T *, int, const T *, T *, int, T *, int, int *); }; #define QR_FUNC_DEF( FUNC ) \ template<typename T> \ static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \ FUNC##_solve_func(); \ \ template<typename T> \ static typename FUNC##_solve_buf_func_def_t<T>::FUNC##_solve_buf_func_def \ FUNC##_solve_buf_func(); \ #define QR_FUNC( FUNC, TYPE, PREFIX ) \ template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def FUNC##_solve_func<TYPE>() \ { return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX##FUNC; } \ \ template<> typename FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def FUNC##_solve_buf_func<TYPE>() \ { return (FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def)& cusolverDn##PREFIX##FUNC##_bufferSize; } QR_FUNC_DEF( geqrf ) QR_FUNC(geqrf , float , S) QR_FUNC(geqrf , double , D) QR_FUNC(geqrf , cfloat , C) QR_FUNC(geqrf , cdouble, Z) #define MQR_FUNC_DEF( FUNC ) \ template<typename T> \ static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \ FUNC##_solve_func(); #define MQR_FUNC( FUNC, TYPE, PREFIX ) \ template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def \ FUNC##_solve_func<TYPE>() \ { return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX; } \ MQR_FUNC_DEF( mqr ) MQR_FUNC(mqr , float , Sormqr) MQR_FUNC(mqr , double , Dormqr) MQR_FUNC(mqr , cfloat , Cunmqr) MQR_FUNC(mqr , cdouble, Zunmqr) template<typename T> Array<T> solveLU(const Array<T> &A, const Array<int> &pivot, const Array<T> &b, const af_mat_prop options) { int N = A.dims()[0]; int NRHS = b.dims()[1]; Array< T > B = copyArray<T>(b); auto info = memAlloc<int>(1); CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(), HIPBLAS_OP_N, N, NRHS, A.get(), A.strides()[1], pivot.get(), B.get(), B.strides()[1], info.get())); return B; } template<typename T> Array<T> generalSolve(const Array<T> &a, const Array<T> &b) { int M = a.dims()[0]; int N = a.dims()[1]; int K = b.dims()[1]; Array<T> A = copyArray<T>(a); Array<T> B = copyArray<T>(b); Array<int> pivot = lu_inplace(A, false); auto info = memAlloc<int>(1); CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(), HIPBLAS_OP_N, N, K, A.get(), A.strides()[1], pivot.get(), B.get(), B.strides()[1], info.get())); return B; } template<typename T> hipblasOperation_t trans() { return HIPBLAS_OP_T; } template<> hipblasOperation_t trans<cfloat>() { return HIPBLAS_OP_C; } template<> hipblasOperation_t trans<cdouble>() { return HIPBLAS_OP_C; } template<typename T> Array<T> leastSquares(const Array<T> &a, const Array<T> &b) { int M = a.dims()[0]; int N = a.dims()[1]; int K = b.dims()[1]; Array<T> B = createEmptyArray<T>(dim4()); if (M < N) { // Least squres for this case is solved using the following // solve(A, B) == matmul(Q, Xpad); // Where: // Xpad == pad(Xt, N - M, 1); // Xt == tri_solve(R1, B); // R1 == R(seq(M), seq(M)); // transpose(A) == matmul(Q, R); // QR is performed on the transpose of A Array<T> A = transpose<T>(a, true); B = padArray<T, T>(b, dim4(N, K), scalar<T>(0)); int lwork = 0; // Get workspace needed for QR CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1], &lwork)); auto workspace = memAlloc<T>(lwork); Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1)); auto info = memAlloc<int>(1); // In place Perform in place QR CUSOLVER_CHECK(geqrf_solve_func<T>()(solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1], t.get(), workspace.get(), lwork, info.get())); // R1 = R(seq(M), seq(M)); A.resetDims(dim4(M, M)); // Bt = tri_solve(R1, B); B.resetDims(dim4(M, K)); trsm<T>(A, B, AF_MAT_CTRANS, true, true, false); // Bpad = pad(Bt, ..) B.resetDims(dim4(N, K)); // matmul(Q, Bpad) CUSOLVER_CHECK(mqr_solve_func<T>()(solverDnHandle(), HIPBLAS_SIDE_LEFT, HIPBLAS_OP_N, B.dims()[0], B.dims()[1], A.dims()[0], A.get(), A.strides()[1], t.get(), B.get(), B.strides()[1], workspace.get(), lwork, info.get())); } else if (M > N) { // Least squres for this case is solved using the following // solve(A, B) == tri_solve(R1, Bt); // Where: // R1 == R(seq(N), seq(N)); // Bt == matmul(transpose(Q1), B); // Q1 == Q(span, seq(N)); // A == matmul(Q, R); Array<T> A = copyArray<T>(a); B = copyArray(b); int lwork = 0; // Get workspace needed for QR CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1], &lwork)); auto workspace = memAlloc<T>(lwork); Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1)); auto info = memAlloc<int>(1); // In place Perform in place QR CUSOLVER_CHECK(geqrf_solve_func<T>()(solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1], t.get(), workspace.get(), lwork, info.get())); // matmul(Q1, B) CUSOLVER_CHECK(mqr_solve_func<T>()(solverDnHandle(), HIPBLAS_SIDE_LEFT, trans<T>(), M, K, N, A.get(), A.strides()[1], t.get(), B.get(), B.strides()[1], workspace.get(), lwork, info.get())); // tri_solve(R1, Bt) A.resetDims(dim4(N, N)); B.resetDims(dim4(N, K)); trsm(A, B, AF_MAT_NONE, true, true, false); } return B; } template<typename T> Array<T> triangleSolve(const Array<T> &A, const Array<T> &b, const af_mat_prop options) { Array<T> B = copyArray<T>(b); trsm(A, B, AF_MAT_NONE, // transpose flag options & AF_MAT_UPPER ? true : false, true, // is_left options & AF_MAT_DIAG_UNIT ? true : false); return B; } template<typename T> Array<T> solve(const Array<T> &a, const Array<T> &b, const af_mat_prop options) { if (options & AF_MAT_UPPER || options & AF_MAT_LOWER) { return triangleSolve<T>(a, b, options); } if(a.dims()[0] == a.dims()[1]) { return generalSolve<T>(a, b); } else { return leastSquares<T>(a, b); } } #define INSTANTIATE_SOLVE(T) \ template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \ const af_mat_prop options); \ template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \ const Array<T> &b, const af_mat_prop options); \ INSTANTIATE_SOLVE(float) INSTANTIATE_SOLVE(cfloat) INSTANTIATE_SOLVE(double) INSTANTIATE_SOLVE(cdouble) }
fe5d3b34361a312af2e54ef40ea647d0f81a251a.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <common/err_common.hpp> #include <solve.hpp> #include <platform.hpp> #include <cublas_v2.h> #include <identity.hpp> #include <memory.hpp> #include <copy.hpp> #include <transpose.hpp> #include <math.hpp> #include <common/err_common.hpp> #include <blas.hpp> #include <lu.hpp> #include <qr.hpp> #include <cstdio> namespace cuda { //cusolverStatus_t cusolverDn<>getrs( // cusolverDnHandle_t handle, // cublasOperation_t trans, // int n, int nrhs, // const <> *A, int lda, // const int *devIpiv, // <> *B, int ldb, // int *devInfo ); template<typename T> struct getrs_func_def_t { typedef cusolverStatus_t (*getrs_func_def) ( cusolverDnHandle_t, cublasOperation_t, int, int, const T *, int, const int *, T *, int, int *); }; #define SOLVE_FUNC_DEF( FUNC ) \ template<typename T> \ typename FUNC##_func_def_t<T>::FUNC##_func_def \ FUNC##_func(); #define SOLVE_FUNC( FUNC, TYPE, PREFIX ) \ template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() \ { return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusolverDn##PREFIX##FUNC; } \ SOLVE_FUNC_DEF( getrs ) SOLVE_FUNC(getrs , float , S) SOLVE_FUNC(getrs , double , D) SOLVE_FUNC(getrs , cfloat , C) SOLVE_FUNC(getrs , cdouble, Z) //cusolverStatus_t cusolverDn<>geqrf_bufferSize( // cusolverDnHandle_t handle, // int m, int n, // <> *A, // int lda, // int *Lwork ); // //cusolverStatus_t cusolverDn<>geqrf( // cusolverDnHandle_t handle, // int m, int n, // <> *A, int lda, // <> *TAU, // <> *Workspace, // int Lwork, int *devInfo ); // //cusolverStatus_t cusolverDn<>mqr( // cusolverDnHandle_t handle, // cublasSideMode_t side, cublasOperation_t trans, // int m, int n, int k, // const double *A, int lda, // const double *tau, // double *C, int ldc, // double *work, // int lwork, int *devInfo); template<typename T> struct geqrf_solve_func_def_t { typedef cusolverStatus_t (*geqrf_solve_func_def) ( cusolverDnHandle_t, int, int, T *, int, T *, T *, int, int *); }; template<typename T> struct geqrf_solve_buf_func_def_t { typedef cusolverStatus_t (*geqrf_solve_buf_func_def) ( cusolverDnHandle_t, int, int, T *, int, int *); }; template<typename T> struct mqr_solve_func_def_t { typedef cusolverStatus_t (*mqr_solve_func_def) ( cusolverDnHandle_t, cublasSideMode_t, cublasOperation_t, int, int, int, const T *, int, const T *, T *, int, T *, int, int *); }; #define QR_FUNC_DEF( FUNC ) \ template<typename T> \ static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \ FUNC##_solve_func(); \ \ template<typename T> \ static typename FUNC##_solve_buf_func_def_t<T>::FUNC##_solve_buf_func_def \ FUNC##_solve_buf_func(); \ #define QR_FUNC( FUNC, TYPE, PREFIX ) \ template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def FUNC##_solve_func<TYPE>() \ { return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX##FUNC; } \ \ template<> typename FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def FUNC##_solve_buf_func<TYPE>() \ { return (FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def)& cusolverDn##PREFIX##FUNC##_bufferSize; } QR_FUNC_DEF( geqrf ) QR_FUNC(geqrf , float , S) QR_FUNC(geqrf , double , D) QR_FUNC(geqrf , cfloat , C) QR_FUNC(geqrf , cdouble, Z) #define MQR_FUNC_DEF( FUNC ) \ template<typename T> \ static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \ FUNC##_solve_func(); #define MQR_FUNC( FUNC, TYPE, PREFIX ) \ template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def \ FUNC##_solve_func<TYPE>() \ { return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX; } \ MQR_FUNC_DEF( mqr ) MQR_FUNC(mqr , float , Sormqr) MQR_FUNC(mqr , double , Dormqr) MQR_FUNC(mqr , cfloat , Cunmqr) MQR_FUNC(mqr , cdouble, Zunmqr) template<typename T> Array<T> solveLU(const Array<T> &A, const Array<int> &pivot, const Array<T> &b, const af_mat_prop options) { int N = A.dims()[0]; int NRHS = b.dims()[1]; Array< T > B = copyArray<T>(b); auto info = memAlloc<int>(1); CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(), CUBLAS_OP_N, N, NRHS, A.get(), A.strides()[1], pivot.get(), B.get(), B.strides()[1], info.get())); return B; } template<typename T> Array<T> generalSolve(const Array<T> &a, const Array<T> &b) { int M = a.dims()[0]; int N = a.dims()[1]; int K = b.dims()[1]; Array<T> A = copyArray<T>(a); Array<T> B = copyArray<T>(b); Array<int> pivot = lu_inplace(A, false); auto info = memAlloc<int>(1); CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(), CUBLAS_OP_N, N, K, A.get(), A.strides()[1], pivot.get(), B.get(), B.strides()[1], info.get())); return B; } template<typename T> cublasOperation_t trans() { return CUBLAS_OP_T; } template<> cublasOperation_t trans<cfloat>() { return CUBLAS_OP_C; } template<> cublasOperation_t trans<cdouble>() { return CUBLAS_OP_C; } template<typename T> Array<T> leastSquares(const Array<T> &a, const Array<T> &b) { int M = a.dims()[0]; int N = a.dims()[1]; int K = b.dims()[1]; Array<T> B = createEmptyArray<T>(dim4()); if (M < N) { // Least squres for this case is solved using the following // solve(A, B) == matmul(Q, Xpad); // Where: // Xpad == pad(Xt, N - M, 1); // Xt == tri_solve(R1, B); // R1 == R(seq(M), seq(M)); // transpose(A) == matmul(Q, R); // QR is performed on the transpose of A Array<T> A = transpose<T>(a, true); B = padArray<T, T>(b, dim4(N, K), scalar<T>(0)); int lwork = 0; // Get workspace needed for QR CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1], &lwork)); auto workspace = memAlloc<T>(lwork); Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1)); auto info = memAlloc<int>(1); // In place Perform in place QR CUSOLVER_CHECK(geqrf_solve_func<T>()(solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1], t.get(), workspace.get(), lwork, info.get())); // R1 = R(seq(M), seq(M)); A.resetDims(dim4(M, M)); // Bt = tri_solve(R1, B); B.resetDims(dim4(M, K)); trsm<T>(A, B, AF_MAT_CTRANS, true, true, false); // Bpad = pad(Bt, ..) B.resetDims(dim4(N, K)); // matmul(Q, Bpad) CUSOLVER_CHECK(mqr_solve_func<T>()(solverDnHandle(), CUBLAS_SIDE_LEFT, CUBLAS_OP_N, B.dims()[0], B.dims()[1], A.dims()[0], A.get(), A.strides()[1], t.get(), B.get(), B.strides()[1], workspace.get(), lwork, info.get())); } else if (M > N) { // Least squres for this case is solved using the following // solve(A, B) == tri_solve(R1, Bt); // Where: // R1 == R(seq(N), seq(N)); // Bt == matmul(transpose(Q1), B); // Q1 == Q(span, seq(N)); // A == matmul(Q, R); Array<T> A = copyArray<T>(a); B = copyArray(b); int lwork = 0; // Get workspace needed for QR CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1], &lwork)); auto workspace = memAlloc<T>(lwork); Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1)); auto info = memAlloc<int>(1); // In place Perform in place QR CUSOLVER_CHECK(geqrf_solve_func<T>()(solverDnHandle(), A.dims()[0], A.dims()[1], A.get(), A.strides()[1], t.get(), workspace.get(), lwork, info.get())); // matmul(Q1, B) CUSOLVER_CHECK(mqr_solve_func<T>()(solverDnHandle(), CUBLAS_SIDE_LEFT, trans<T>(), M, K, N, A.get(), A.strides()[1], t.get(), B.get(), B.strides()[1], workspace.get(), lwork, info.get())); // tri_solve(R1, Bt) A.resetDims(dim4(N, N)); B.resetDims(dim4(N, K)); trsm(A, B, AF_MAT_NONE, true, true, false); } return B; } template<typename T> Array<T> triangleSolve(const Array<T> &A, const Array<T> &b, const af_mat_prop options) { Array<T> B = copyArray<T>(b); trsm(A, B, AF_MAT_NONE, // transpose flag options & AF_MAT_UPPER ? true : false, true, // is_left options & AF_MAT_DIAG_UNIT ? true : false); return B; } template<typename T> Array<T> solve(const Array<T> &a, const Array<T> &b, const af_mat_prop options) { if (options & AF_MAT_UPPER || options & AF_MAT_LOWER) { return triangleSolve<T>(a, b, options); } if(a.dims()[0] == a.dims()[1]) { return generalSolve<T>(a, b); } else { return leastSquares<T>(a, b); } } #define INSTANTIATE_SOLVE(T) \ template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \ const af_mat_prop options); \ template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \ const Array<T> &b, const af_mat_prop options); \ INSTANTIATE_SOLVE(float) INSTANTIATE_SOLVE(cfloat) INSTANTIATE_SOLVE(double) INSTANTIATE_SOLVE(cdouble) }
fc3192f932d250863fb2ee20655208bb885b5ef3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------- // CUDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision$ // $Date$ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt // in the root directory of this source distribution. // ------------------------------------------------------------- #include "cudpp_radixsort.h" #include <cudpp_globals.h> #include "sharedmem.h" #include "cta/radixsort_cta.cu" #ifdef __DEVICE_EMULATION__ #define __EMUSYNC __syncthreads() #else #define __EMUSYNC #endif /** * @file * radixsort_app.cu * * @brief CUDPP kernel-level radix sorting routines */ /** \addtogroup cudpp_kernel * @{ */ /** @name RadixSort Functions * @{ */ typedef unsigned int uint; /** @brief And empty kernel used to reset CTA issue hardware **/ __global__ void emptyKernel() {} /** @brief Does special binary arithmetic before sorting floats * * Uses floatFlip function to flip bits. * @param[in,out] values Values to be manipulated * @param[in] numValues Number of values to be flipped **/ __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) flipFloats(uint *values, uint numValues) { uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); } /** @brief Undoes the flips from flipFloats * * Uses floatUnflip function to unflip bits. * @param[in,out] values Values to be manipulated * @param[in] numValues Number of values to be unflipped **/ __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) unflipFloats(uint *values, uint numValues) { uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); } /** @brief Optimization for sorts of WARP_SIZE or fewer elements * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] numElements Number of elements in the sort. */ template <bool flip> __global__ LAUNCH_BOUNDS(WARP_SIZE) void radixSortSingleWarp(uint *keys, uint *values, uint numElements) { volatile __shared__ uint sKeys[WARP_SIZE]; //remove class distinctions volatile __shared__ uint sValues[WARP_SIZE]; volatile __shared__ uint sFlags[WARP_SIZE]; sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]); sValues[threadIdx.x] = values[threadIdx.x]; __EMUSYNC; // emulation only for(uint i = 1; i < numElements; i++) { uint key_i = sKeys[i]; uint val_i = sValues[i]; sFlags[threadIdx.x] = 0; uint temp, tempval; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { temp = sKeys[threadIdx.x]; tempval = sValues[threadIdx.x]; sFlags[threadIdx.x] = 1; #ifdef __DEVICE_EMULATION__ } __EMUSYNC; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { #endif sKeys[threadIdx.x + 1] = temp; sValues[threadIdx.x + 1] = tempval; sFlags[threadIdx.x + 1] = 0; } if(sFlags[threadIdx.x] == 1 ) { sKeys[threadIdx.x] = key_i; sValues[threadIdx.x] = val_i; } __EMUSYNC; // emulation only } keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]); values[threadIdx.x] = sValues[threadIdx.x]; } /** @brief Optimization for sorts of WARP_SIZE or fewer elements. Keys-Only version. * * @param[in,out] keys Keys to be sorted * @param[in] numElements Total number of elements to be sorted **/ template <bool flip> __global__ LAUNCH_BOUNDS(WARP_SIZE) void radixSortSingleWarpKeysOnly(uint *keys, uint numElements) { volatile __shared__ uint sKeys[WARP_SIZE]; volatile __shared__ uint sFlags[WARP_SIZE]; sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]); __EMUSYNC; // emulation only for(uint i = 1; i < numElements; i++) { uint key_i = sKeys[i]; sFlags[threadIdx.x] = 0; uint temp; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { temp = sKeys[threadIdx.x]; sFlags[threadIdx.x] = 1; #ifdef __DEVICE_EMULATION__ } __EMUSYNC; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { #endif sKeys[threadIdx.x + 1] = temp; sFlags[threadIdx.x + 1] = 0; } if(sFlags[threadIdx.x] == 1 ) { sKeys[threadIdx.x] = key_i; } __EMUSYNC; // emulation only } keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]); } /** @brief sorts all blocks of data independently in shared memory. * Each thread block (CTA) sorts one block of 4*CTA_SIZE elements * * The radix sort is done in two stages. This stage calls radixSortBlock on each * block independently, sorting on the basis of bits (startbit) -> (startbit + nbits) * * Template parameters are used to generate efficient code for various special cases * For example, we have to handle arrays that are a multiple of the block size (fullBlocks) * differently than arrays that are not. "flip" is used to only compile in the * float flip code when float keys are used. "loop" is used when persistent CTAs * are used. * * By persistent CTAs we mean that we launch only as many thread blocks as can * be resident in the GPU and no more, rather than launching as many threads as * we have elements. Persistent CTAs loop over blocks of elements until all work * is complete. This can be faster in some cases. In our tests it is faster * for large sorts (and the threshold is higher on compute version 1.1 and earlier * GPUs than it is on compute version 1.2 GPUs. * * @param[out] keysOut Output of sorted keys * @param[out] valuesOut Output of associated values * @param[in] keysIn Input of unsorted keys in GPU * @param[in] valuesIn Input of associated input values * @param[in] numElements Total number of elements to sort * @param[in] totalBlocks The number of blocks of data to sort */ template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop> __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) radixSortBlocks(uint4* keysOut, uint4* valuesOut, uint4* keysIn, uint4* valuesIn, uint numElements, uint totalBlocks) { extern __shared__ uint4 sMem[]; uint4 key, value; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; uint idx = i << 2; // handle non-full last block if array is not multiple of 1024 numElements if (!fullBlocks && idx+3 >= numElements) { if (idx >= numElements) { key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); } else { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysIn; uint *values1 = (uint*)valuesIn; key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX; key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX; key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX; key.w = UINT_MAX; value.x = (idx < numElements) ? values1[idx] : UINT_MAX; value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX; value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX; value.w = UINT_MAX; } } else { key = keysIn[i]; value = valuesIn[i]; if (flip) { key.x = floatFlip<flip>(key.x); key.y = floatFlip<flip>(key.y); key.z = floatFlip<flip>(key.z); key.w = floatFlip<flip>(key.w); } } __syncthreads(); radixSortBlock<nbits, startbit>(key, value); // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && idx+3 >= numElements) { if (idx < numElements) { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysOut; uint *values1 = (uint*)valuesOut; keys1[idx] = key.x; values1[idx] = value.x; if (idx + 1 < numElements) { keys1[idx + 1] = key.y; values1[idx + 1] = value.y; if (idx + 2 < numElements) { keys1[idx + 2] = key.z; values1[idx + 2] = value.z; } } } } else { keysOut[i] = key; valuesOut[i] = value; } if (loop) blockId += gridDim.x; else break; } } /** @brief Computes the number of keys of each radix in each block stores offset. * * Given an array with blocks sorted according to a 4-bit radix group, each * block counts the number of keys that fall into each radix in the group, and * finds the starting offset of each radix in the block. It then writes the radix * counts to the counters array, and the starting offsets to the blockOffsets array. * * Template parameters are used to generate efficient code for various special cases * For example, we have to handle arrays that are a multiple of the block size * (fullBlocks) differently than arrays that are not. "loop" is used when persistent * CTAs are used. * * By persistent CTAs we mean that we launch only as many thread blocks as can * be resident in the GPU and no more, rather than launching as many threads as * we have elements. Persistent CTAs loop over blocks of elements until all work * is complete. This can be faster in some cases. In our tests it is faster * for large sorts (and the threshold is higher on compute version 1.1 and earlier * GPUs than it is on compute version 1.2 GPUs. * * @param[in] keys Input keys * @param[out] counters Radix count for each block * @param[out] blockOffsets The offset address for each block * @param[in] numElements Total number of elements * @param[in] totalBlocks Total number of blocks **/ template<uint startbit, bool fullBlocks, bool loop> __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) findRadixOffsets(uint2 *keys, uint *counters, uint *blockOffsets, uint numElements, uint totalBlocks) { extern __shared__ uint sRadix1[]; __shared__ uint sStartPointers[16]; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint2 radix2; uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && ((i + 1) << 1 ) > numElements ) { // handle uint1 rather than uint2 for non-full blocks uint *keys1 = (uint*)keys; uint j = i << 1; radix2.x = (j < numElements) ? keys1[j] : UINT_MAX; j++; radix2.y = (j < numElements) ? keys1[j] : UINT_MAX; } else { radix2 = keys[i]; } sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF; sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF; // Finds the position where the sRadix1 entries differ and stores start // index for each radix. if(threadIdx.x < 16) { sStartPointers[threadIdx.x] = 0; } __syncthreads(); if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) ) { sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x; } if(sRadix1[threadIdx.x + SORT_CTA_SIZE] != sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]) { sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE]] = threadIdx.x + SORT_CTA_SIZE; } __syncthreads(); if(threadIdx.x < 16) { blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x]; } __syncthreads(); // Compute the sizes of each block. if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) ) { sStartPointers[sRadix1[threadIdx.x - 1]] = threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]]; } if(sRadix1[threadIdx.x + SORT_CTA_SIZE] != sRadix1[threadIdx.x + SORT_CTA_SIZE - 1] ) { sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]] = threadIdx.x + SORT_CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]]; } if(threadIdx.x == SORT_CTA_SIZE - 1) { sStartPointers[sRadix1[2 * SORT_CTA_SIZE - 1]] = 2 * SORT_CTA_SIZE - sStartPointers[sRadix1[2 * SORT_CTA_SIZE - 1]]; } __syncthreads(); if(threadIdx.x < 16) { counters[threadIdx.x * totalBlocks + blockId] = sStartPointers[threadIdx.x]; } if (loop) blockId += gridDim.x; else break; } } /**@brief Reorders data in the global array. * * reorderData shuffles data in the array globally after the radix * offsets have been found. On compute version 1.1 and earlier GPUs, this code depends * on SORT_CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits). * * On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures * that all writes are coalesced using extra work in the kernel. On later * GPUs coalescing rules have been relaxed, so this extra overhead hurts * performance. On these GPUs we set manualCoalesce=false and directly store * the results. * * Template parameters are used to generate efficient code for various special cases * For example, we have to handle arrays that are a multiple of the block size * (fullBlocks) differently than arrays that are not. "loop" is used when persistent * CTAs are used. * * By persistent CTAs we mean that we launch only as many thread blocks as can * be resident in the GPU and no more, rather than launching as many threads as * we have elements. Persistent CTAs loop over blocks of elements until all work * is complete. This can be faster in some cases. In our tests it is faster * for large sorts (and the threshold is higher on compute version 1.1 and earlier * GPUs than it is on compute version 1.2 GPUs. * * @param[out] outKeys Output of sorted keys * @param[out] outValues Output of associated values * @param[in] keys Input of unsorted keys in GPU * @param[in] values Input of associated input values * @param[in] blockOffsets The offset address for each block * @param[in] offsets Address of each radix within each block * @param[in] sizes Number of elements in a block * @param[in] numElements Total number of elements * @param[in] totalBlocks Total number of data blocks to process * * @todo Args that are const below should be prototyped as const **/ template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop> __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) reorderData(uint *outKeys, uint *outValues, uint2 *keys, uint2 *values, uint *blockOffsets, uint *offsets, uint *sizes, uint numElements, uint totalBlocks) { __shared__ uint2 sKeys2[SORT_CTA_SIZE]; __shared__ uint2 sValues2[SORT_CTA_SIZE]; __shared__ uint sOffsets[16]; __shared__ uint sBlockOffsets[16]; uint *sKeys1 = (uint*)sKeys2; uint *sValues1 = (uint*)sValues2; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && (((i + 1) << 1) > numElements)) { uint *keys1 = (uint*)keys; uint *values1 = (uint*)values; uint j = i << 1; sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX; sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX; j++; sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX; sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX; } else { sKeys2[threadIdx.x] = keys[i]; sValues2[threadIdx.x] = values[i]; } if (!manualCoalesce) { if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; } __syncthreads(); uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF; uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]); outValues[globalOffset] = sValues1[threadIdx.x]; } radix = (sKeys1[threadIdx.x + SORT_CTA_SIZE] >> startbit) & 0xF; globalOffset = sOffsets[radix] + threadIdx.x + SORT_CTA_SIZE - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + SORT_CTA_SIZE]); outValues[globalOffset] = sValues1[threadIdx.x + SORT_CTA_SIZE]; } } else { __shared__ uint sSizes[16]; if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId]; } __syncthreads(); // 1 half-warp is responsible for writing out all values for 1 radix. // Loops if there are more than 16 values to be written out. // All start indices are rounded down to the nearest multiple of 16, and // all end indices are rounded up to the nearest multiple of 16. // Thus it can do extra work if the start and end indices are not multiples of 16 // This is bounded by a factor of 2 (it can do 2X more work at most). const uint halfWarpID = threadIdx.x >> 4; const uint halfWarpOffset = threadIdx.x & 0xF; const uint leadingInvalid = sOffsets[halfWarpID] & 0xF; uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0; uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 - ((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF); uint numIterations = endPos - startPos; uint outOffset = startPos + halfWarpOffset; uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset; for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16) { if( (outOffset >= sOffsets[halfWarpID]) && (inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID])) { if(blockId < totalBlocks - 1 || outOffset < numElements) { outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]); outValues[outOffset] = sValues1[inOffset]; } } } } if (loop) { blockId += gridDim.x; __syncthreads(); } else break; } } /** @brief Sorts all blocks of data independently in shared memory. * Each thread block (CTA) sorts one block of 4*CTA_SIZE elements * * The radix sort is done in two stages. This stage calls radixSortBlock on each * block independently, sorting on the basis of bits (startbit) -> (startbit + nbits) * * Template parameters are used to generate efficient code for various special cases * For example, we have to handle arrays that are a multiple of the block size (fullBlocks) * differently than arrays that are not. "flip" is used to only compile in the * float flip code when float keys are used. "loop" is used when persistent CTAs * are used. * * By persistent CTAs we mean that we launch only as many thread blocks as can * be resident in the GPU and no more, rather than launching as many threads as * we have elements. Persistent CTAs loop over blocks of elements until all work * is complete. This can be faster in some cases. In our tests it is faster * for large sorts (and the threshold is higher on compute version 1.1 and earlier * GPUs than it is on compute version 1.2 GPUs. * * @param[out] keysOut Output of sorted keys GPU main memory * @param[in] keysIn Input of unsorted keys in GPU main memory * @param[in] numElements Total number of elements to sort * @param[in] totalBlocks Total number of blocks to sort * */ template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop> __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint totalBlocks) { extern __shared__ uint4 sMem[]; uint4 key; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; uint idx = i << 2; // handle non-full last block if array is not multiple of 1024 numElements if (!fullBlocks && idx+3 >= numElements) { if (idx >= numElements) { key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); } else { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysIn; key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX; key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX; key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX; key.w = UINT_MAX; } } else { key = keysIn[i]; if (flip) { key.x = floatFlip<flip>(key.x); key.y = floatFlip<flip>(key.y); key.z = floatFlip<flip>(key.z); key.w = floatFlip<flip>(key.w); } } __syncthreads(); radixSortBlockKeysOnly<nbits, startbit>(key); // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && idx+3 >= numElements) { if (idx < numElements) { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysOut; keys1[idx] = key.x; if (idx + 1 < numElements) { keys1[idx + 1] = key.y; if (idx + 2 < numElements) { keys1[idx + 2] = key.z; } } } } else { keysOut[i] = key; } if (loop) blockId += gridDim.x; else break; } } /** @brief Reorders data in the global array. * * reorderDataKeysOnly shuffles data in the array globally after the radix offsets * have been found. On compute version 1.1 and earlier GPUs, this code depends * on SORT_CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits). * * On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures * that all writes are coalesced using extra work in the kernel. On later * GPUs coalescing rules have been relaxed, so this extra overhead hurts * performance. On these GPUs we set manualCoalesce=false and directly store * the results. * * Template parameters are used to generate efficient code for various special cases * For example, we have to handle arrays that are a multiple of the block size * (fullBlocks) differently than arrays that are not. "loop" is used when persistent * CTAs are used. * * By persistent CTAs we mean that we launch only as many thread blocks as can * be resident in the GPU and no more, rather than launching as many threads as * we have elements. Persistent CTAs loop over blocks of elements until all work * is complete. This can be faster in some cases. In our tests it is faster * for large sorts (and the threshold is higher on compute version 1.1 and earlier * GPUs than it is on compute version 1.2 GPUs. * * @param[out] outKeys Output result of reorderDataKeysOnly() * @param[in] keys Keys to be reordered * @param[in] blockOffsets Start offset for each block * @param[in] offsets Offset of each radix within each block * @param[in] sizes Number of elements in a block * @param[in] numElements Total number of elements * @param[in] totalBlocks Total number of blocks */ template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop> __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) reorderDataKeysOnly(uint *outKeys, uint2 *keys, uint *blockOffsets, uint *offsets, uint *sizes, uint numElements, uint totalBlocks) { __shared__ uint2 sKeys2[SORT_CTA_SIZE]; __shared__ uint sOffsets[16]; __shared__ uint sBlockOffsets[16]; uint *sKeys1 = (uint*)sKeys2; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && (((i + 1) << 1) > numElements)) { uint *keys1 = (uint*)keys; uint j = i << 1; sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX; j++; sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX; } else { sKeys2[threadIdx.x] = keys[i]; } if (!manualCoalesce) { if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; } __syncthreads(); uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF; uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]); } radix = (sKeys1[threadIdx.x + SORT_CTA_SIZE] >> startbit) & 0xF; globalOffset = sOffsets[radix] + threadIdx.x + SORT_CTA_SIZE - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + SORT_CTA_SIZE]); } } else { __shared__ uint sSizes[16]; if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId]; } __syncthreads(); // 1 half-warp is responsible for writing out all values for 1 radix. // Loops if there are more than 16 values to be written out. // All start indices are rounded down to the nearest multiple of 16, and // all end indices are rounded up to the nearest multiple of 16. // Thus it can do extra work if the start and end indices are not multiples of 16 // This is bounded by a factor of 2 (it can do 2X more work at most). const uint halfWarpID = threadIdx.x >> 4; const uint halfWarpOffset = threadIdx.x & 0xF; const uint leadingInvalid = sOffsets[halfWarpID] & 0xF; uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0; uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 - ((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF); uint numIterations = endPos - startPos; uint outOffset = startPos + halfWarpOffset; uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset; for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16) { if( (outOffset >= sOffsets[halfWarpID]) && (inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID])) { if(blockId < totalBlocks - 1 || outOffset < numElements) { outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]); } } } } if (loop) { blockId += gridDim.x; __syncthreads(); } else break; } } /** @} */ // end radixsort functions /** @} */ // end cudpp_kernel
fc3192f932d250863fb2ee20655208bb885b5ef3.cu
// ------------------------------------------------------------- // CUDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision$ // $Date$ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt // in the root directory of this source distribution. // ------------------------------------------------------------- #include "cudpp_radixsort.h" #include <cudpp_globals.h> #include "sharedmem.h" #include "cta/radixsort_cta.cu" #ifdef __DEVICE_EMULATION__ #define __EMUSYNC __syncthreads() #else #define __EMUSYNC #endif /** * @file * radixsort_app.cu * * @brief CUDPP kernel-level radix sorting routines */ /** \addtogroup cudpp_kernel * @{ */ /** @name RadixSort Functions * @{ */ typedef unsigned int uint; /** @brief And empty kernel used to reset CTA issue hardware **/ __global__ void emptyKernel() {} /** @brief Does special binary arithmetic before sorting floats * * Uses floatFlip function to flip bits. * @param[in,out] values Values to be manipulated * @param[in] numValues Number of values to be flipped **/ __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) flipFloats(uint *values, uint numValues) { uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatFlip<true>(values[index]); } /** @brief Undoes the flips from flipFloats * * Uses floatUnflip function to unflip bits. * @param[in,out] values Values to be manipulated * @param[in] numValues Number of values to be unflipped **/ __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) unflipFloats(uint *values, uint numValues) { uint index = __umul24(blockDim.x*4, blockIdx.x) + threadIdx.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); index += blockDim.x; if (index < numValues) values[index] = floatUnflip<true>(values[index]); } /** @brief Optimization for sorts of WARP_SIZE or fewer elements * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] numElements Number of elements in the sort. */ template <bool flip> __global__ LAUNCH_BOUNDS(WARP_SIZE) void radixSortSingleWarp(uint *keys, uint *values, uint numElements) { volatile __shared__ uint sKeys[WARP_SIZE]; //remove class distinctions volatile __shared__ uint sValues[WARP_SIZE]; volatile __shared__ uint sFlags[WARP_SIZE]; sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]); sValues[threadIdx.x] = values[threadIdx.x]; __EMUSYNC; // emulation only for(uint i = 1; i < numElements; i++) { uint key_i = sKeys[i]; uint val_i = sValues[i]; sFlags[threadIdx.x] = 0; uint temp, tempval; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { temp = sKeys[threadIdx.x]; tempval = sValues[threadIdx.x]; sFlags[threadIdx.x] = 1; #ifdef __DEVICE_EMULATION__ } __EMUSYNC; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { #endif sKeys[threadIdx.x + 1] = temp; sValues[threadIdx.x + 1] = tempval; sFlags[threadIdx.x + 1] = 0; } if(sFlags[threadIdx.x] == 1 ) { sKeys[threadIdx.x] = key_i; sValues[threadIdx.x] = val_i; } __EMUSYNC; // emulation only } keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]); values[threadIdx.x] = sValues[threadIdx.x]; } /** @brief Optimization for sorts of WARP_SIZE or fewer elements. Keys-Only version. * * @param[in,out] keys Keys to be sorted * @param[in] numElements Total number of elements to be sorted **/ template <bool flip> __global__ LAUNCH_BOUNDS(WARP_SIZE) void radixSortSingleWarpKeysOnly(uint *keys, uint numElements) { volatile __shared__ uint sKeys[WARP_SIZE]; volatile __shared__ uint sFlags[WARP_SIZE]; sKeys[threadIdx.x] = floatFlip<flip>(keys[threadIdx.x]); __EMUSYNC; // emulation only for(uint i = 1; i < numElements; i++) { uint key_i = sKeys[i]; sFlags[threadIdx.x] = 0; uint temp; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { temp = sKeys[threadIdx.x]; sFlags[threadIdx.x] = 1; #ifdef __DEVICE_EMULATION__ } __EMUSYNC; if( (threadIdx.x < i) && (sKeys[threadIdx.x] > key_i) ) { #endif sKeys[threadIdx.x + 1] = temp; sFlags[threadIdx.x + 1] = 0; } if(sFlags[threadIdx.x] == 1 ) { sKeys[threadIdx.x] = key_i; } __EMUSYNC; // emulation only } keys[threadIdx.x] = floatUnflip<flip>(sKeys[threadIdx.x]); } /** @brief sorts all blocks of data independently in shared memory. * Each thread block (CTA) sorts one block of 4*CTA_SIZE elements * * The radix sort is done in two stages. This stage calls radixSortBlock on each * block independently, sorting on the basis of bits (startbit) -> (startbit + nbits) * * Template parameters are used to generate efficient code for various special cases * For example, we have to handle arrays that are a multiple of the block size (fullBlocks) * differently than arrays that are not. "flip" is used to only compile in the * float flip code when float keys are used. "loop" is used when persistent CTAs * are used. * * By persistent CTAs we mean that we launch only as many thread blocks as can * be resident in the GPU and no more, rather than launching as many threads as * we have elements. Persistent CTAs loop over blocks of elements until all work * is complete. This can be faster in some cases. In our tests it is faster * for large sorts (and the threshold is higher on compute version 1.1 and earlier * GPUs than it is on compute version 1.2 GPUs. * * @param[out] keysOut Output of sorted keys * @param[out] valuesOut Output of associated values * @param[in] keysIn Input of unsorted keys in GPU * @param[in] valuesIn Input of associated input values * @param[in] numElements Total number of elements to sort * @param[in] totalBlocks The number of blocks of data to sort */ template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop> __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) radixSortBlocks(uint4* keysOut, uint4* valuesOut, uint4* keysIn, uint4* valuesIn, uint numElements, uint totalBlocks) { extern __shared__ uint4 sMem[]; uint4 key, value; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; uint idx = i << 2; // handle non-full last block if array is not multiple of 1024 numElements if (!fullBlocks && idx+3 >= numElements) { if (idx >= numElements) { key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); value = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); } else { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysIn; uint *values1 = (uint*)valuesIn; key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX; key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX; key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX; key.w = UINT_MAX; value.x = (idx < numElements) ? values1[idx] : UINT_MAX; value.y = (idx+1 < numElements) ? values1[idx+1] : UINT_MAX; value.z = (idx+2 < numElements) ? values1[idx+2] : UINT_MAX; value.w = UINT_MAX; } } else { key = keysIn[i]; value = valuesIn[i]; if (flip) { key.x = floatFlip<flip>(key.x); key.y = floatFlip<flip>(key.y); key.z = floatFlip<flip>(key.z); key.w = floatFlip<flip>(key.w); } } __syncthreads(); radixSortBlock<nbits, startbit>(key, value); // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && idx+3 >= numElements) { if (idx < numElements) { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysOut; uint *values1 = (uint*)valuesOut; keys1[idx] = key.x; values1[idx] = value.x; if (idx + 1 < numElements) { keys1[idx + 1] = key.y; values1[idx + 1] = value.y; if (idx + 2 < numElements) { keys1[idx + 2] = key.z; values1[idx + 2] = value.z; } } } } else { keysOut[i] = key; valuesOut[i] = value; } if (loop) blockId += gridDim.x; else break; } } /** @brief Computes the number of keys of each radix in each block stores offset. * * Given an array with blocks sorted according to a 4-bit radix group, each * block counts the number of keys that fall into each radix in the group, and * finds the starting offset of each radix in the block. It then writes the radix * counts to the counters array, and the starting offsets to the blockOffsets array. * * Template parameters are used to generate efficient code for various special cases * For example, we have to handle arrays that are a multiple of the block size * (fullBlocks) differently than arrays that are not. "loop" is used when persistent * CTAs are used. * * By persistent CTAs we mean that we launch only as many thread blocks as can * be resident in the GPU and no more, rather than launching as many threads as * we have elements. Persistent CTAs loop over blocks of elements until all work * is complete. This can be faster in some cases. In our tests it is faster * for large sorts (and the threshold is higher on compute version 1.1 and earlier * GPUs than it is on compute version 1.2 GPUs. * * @param[in] keys Input keys * @param[out] counters Radix count for each block * @param[out] blockOffsets The offset address for each block * @param[in] numElements Total number of elements * @param[in] totalBlocks Total number of blocks **/ template<uint startbit, bool fullBlocks, bool loop> __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) findRadixOffsets(uint2 *keys, uint *counters, uint *blockOffsets, uint numElements, uint totalBlocks) { extern __shared__ uint sRadix1[]; __shared__ uint sStartPointers[16]; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint2 radix2; uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && ((i + 1) << 1 ) > numElements ) { // handle uint1 rather than uint2 for non-full blocks uint *keys1 = (uint*)keys; uint j = i << 1; radix2.x = (j < numElements) ? keys1[j] : UINT_MAX; j++; radix2.y = (j < numElements) ? keys1[j] : UINT_MAX; } else { radix2 = keys[i]; } sRadix1[2 * threadIdx.x] = (radix2.x >> startbit) & 0xF; sRadix1[2 * threadIdx.x + 1] = (radix2.y >> startbit) & 0xF; // Finds the position where the sRadix1 entries differ and stores start // index for each radix. if(threadIdx.x < 16) { sStartPointers[threadIdx.x] = 0; } __syncthreads(); if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) ) { sStartPointers[sRadix1[threadIdx.x]] = threadIdx.x; } if(sRadix1[threadIdx.x + SORT_CTA_SIZE] != sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]) { sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE]] = threadIdx.x + SORT_CTA_SIZE; } __syncthreads(); if(threadIdx.x < 16) { blockOffsets[blockId*16 + threadIdx.x] = sStartPointers[threadIdx.x]; } __syncthreads(); // Compute the sizes of each block. if((threadIdx.x > 0) && (sRadix1[threadIdx.x] != sRadix1[threadIdx.x - 1]) ) { sStartPointers[sRadix1[threadIdx.x - 1]] = threadIdx.x - sStartPointers[sRadix1[threadIdx.x - 1]]; } if(sRadix1[threadIdx.x + SORT_CTA_SIZE] != sRadix1[threadIdx.x + SORT_CTA_SIZE - 1] ) { sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]] = threadIdx.x + SORT_CTA_SIZE - sStartPointers[sRadix1[threadIdx.x + SORT_CTA_SIZE - 1]]; } if(threadIdx.x == SORT_CTA_SIZE - 1) { sStartPointers[sRadix1[2 * SORT_CTA_SIZE - 1]] = 2 * SORT_CTA_SIZE - sStartPointers[sRadix1[2 * SORT_CTA_SIZE - 1]]; } __syncthreads(); if(threadIdx.x < 16) { counters[threadIdx.x * totalBlocks + blockId] = sStartPointers[threadIdx.x]; } if (loop) blockId += gridDim.x; else break; } } /**@brief Reorders data in the global array. * * reorderData shuffles data in the array globally after the radix * offsets have been found. On compute version 1.1 and earlier GPUs, this code depends * on SORT_CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits). * * On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures * that all writes are coalesced using extra work in the kernel. On later * GPUs coalescing rules have been relaxed, so this extra overhead hurts * performance. On these GPUs we set manualCoalesce=false and directly store * the results. * * Template parameters are used to generate efficient code for various special cases * For example, we have to handle arrays that are a multiple of the block size * (fullBlocks) differently than arrays that are not. "loop" is used when persistent * CTAs are used. * * By persistent CTAs we mean that we launch only as many thread blocks as can * be resident in the GPU and no more, rather than launching as many threads as * we have elements. Persistent CTAs loop over blocks of elements until all work * is complete. This can be faster in some cases. In our tests it is faster * for large sorts (and the threshold is higher on compute version 1.1 and earlier * GPUs than it is on compute version 1.2 GPUs. * * @param[out] outKeys Output of sorted keys * @param[out] outValues Output of associated values * @param[in] keys Input of unsorted keys in GPU * @param[in] values Input of associated input values * @param[in] blockOffsets The offset address for each block * @param[in] offsets Address of each radix within each block * @param[in] sizes Number of elements in a block * @param[in] numElements Total number of elements * @param[in] totalBlocks Total number of data blocks to process * * @todo Args that are const below should be prototyped as const **/ template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop> __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) reorderData(uint *outKeys, uint *outValues, uint2 *keys, uint2 *values, uint *blockOffsets, uint *offsets, uint *sizes, uint numElements, uint totalBlocks) { __shared__ uint2 sKeys2[SORT_CTA_SIZE]; __shared__ uint2 sValues2[SORT_CTA_SIZE]; __shared__ uint sOffsets[16]; __shared__ uint sBlockOffsets[16]; uint *sKeys1 = (uint*)sKeys2; uint *sValues1 = (uint*)sValues2; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && (((i + 1) << 1) > numElements)) { uint *keys1 = (uint*)keys; uint *values1 = (uint*)values; uint j = i << 1; sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX; sValues1[threadIdx.x << 1] = (j < numElements) ? values1[j] : UINT_MAX; j++; sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX; sValues1[(threadIdx.x << 1) + 1] = (j < numElements) ? values1[j] : UINT_MAX; } else { sKeys2[threadIdx.x] = keys[i]; sValues2[threadIdx.x] = values[i]; } if (!manualCoalesce) { if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; } __syncthreads(); uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF; uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]); outValues[globalOffset] = sValues1[threadIdx.x]; } radix = (sKeys1[threadIdx.x + SORT_CTA_SIZE] >> startbit) & 0xF; globalOffset = sOffsets[radix] + threadIdx.x + SORT_CTA_SIZE - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + SORT_CTA_SIZE]); outValues[globalOffset] = sValues1[threadIdx.x + SORT_CTA_SIZE]; } } else { __shared__ uint sSizes[16]; if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId]; } __syncthreads(); // 1 half-warp is responsible for writing out all values for 1 radix. // Loops if there are more than 16 values to be written out. // All start indices are rounded down to the nearest multiple of 16, and // all end indices are rounded up to the nearest multiple of 16. // Thus it can do extra work if the start and end indices are not multiples of 16 // This is bounded by a factor of 2 (it can do 2X more work at most). const uint halfWarpID = threadIdx.x >> 4; const uint halfWarpOffset = threadIdx.x & 0xF; const uint leadingInvalid = sOffsets[halfWarpID] & 0xF; uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0; uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 - ((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF); uint numIterations = endPos - startPos; uint outOffset = startPos + halfWarpOffset; uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset; for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16) { if( (outOffset >= sOffsets[halfWarpID]) && (inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID])) { if(blockId < totalBlocks - 1 || outOffset < numElements) { outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]); outValues[outOffset] = sValues1[inOffset]; } } } } if (loop) { blockId += gridDim.x; __syncthreads(); } else break; } } /** @brief Sorts all blocks of data independently in shared memory. * Each thread block (CTA) sorts one block of 4*CTA_SIZE elements * * The radix sort is done in two stages. This stage calls radixSortBlock on each * block independently, sorting on the basis of bits (startbit) -> (startbit + nbits) * * Template parameters are used to generate efficient code for various special cases * For example, we have to handle arrays that are a multiple of the block size (fullBlocks) * differently than arrays that are not. "flip" is used to only compile in the * float flip code when float keys are used. "loop" is used when persistent CTAs * are used. * * By persistent CTAs we mean that we launch only as many thread blocks as can * be resident in the GPU and no more, rather than launching as many threads as * we have elements. Persistent CTAs loop over blocks of elements until all work * is complete. This can be faster in some cases. In our tests it is faster * for large sorts (and the threshold is higher on compute version 1.1 and earlier * GPUs than it is on compute version 1.2 GPUs. * * @param[out] keysOut Output of sorted keys GPU main memory * @param[in] keysIn Input of unsorted keys in GPU main memory * @param[in] numElements Total number of elements to sort * @param[in] totalBlocks Total number of blocks to sort * */ template<uint nbits, uint startbit, bool fullBlocks, bool flip, bool loop> __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) radixSortBlocksKeysOnly(uint4* keysOut, uint4* keysIn, uint numElements, uint totalBlocks) { extern __shared__ uint4 sMem[]; uint4 key; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; uint idx = i << 2; // handle non-full last block if array is not multiple of 1024 numElements if (!fullBlocks && idx+3 >= numElements) { if (idx >= numElements) { key = make_uint4(UINT_MAX, UINT_MAX, UINT_MAX, UINT_MAX); } else { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysIn; key.x = (idx < numElements) ? floatFlip<flip>(keys1[idx]) : UINT_MAX; key.y = (idx+1 < numElements) ? floatFlip<flip>(keys1[idx+1]) : UINT_MAX; key.z = (idx+2 < numElements) ? floatFlip<flip>(keys1[idx+2]) : UINT_MAX; key.w = UINT_MAX; } } else { key = keysIn[i]; if (flip) { key.x = floatFlip<flip>(key.x); key.y = floatFlip<flip>(key.y); key.z = floatFlip<flip>(key.z); key.w = floatFlip<flip>(key.w); } } __syncthreads(); radixSortBlockKeysOnly<nbits, startbit>(key); // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && idx+3 >= numElements) { if (idx < numElements) { // for non-full block, we handle uint1 values instead of uint4 uint *keys1 = (uint*)keysOut; keys1[idx] = key.x; if (idx + 1 < numElements) { keys1[idx + 1] = key.y; if (idx + 2 < numElements) { keys1[idx + 2] = key.z; } } } } else { keysOut[i] = key; } if (loop) blockId += gridDim.x; else break; } } /** @brief Reorders data in the global array. * * reorderDataKeysOnly shuffles data in the array globally after the radix offsets * have been found. On compute version 1.1 and earlier GPUs, this code depends * on SORT_CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits). * * On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures * that all writes are coalesced using extra work in the kernel. On later * GPUs coalescing rules have been relaxed, so this extra overhead hurts * performance. On these GPUs we set manualCoalesce=false and directly store * the results. * * Template parameters are used to generate efficient code for various special cases * For example, we have to handle arrays that are a multiple of the block size * (fullBlocks) differently than arrays that are not. "loop" is used when persistent * CTAs are used. * * By persistent CTAs we mean that we launch only as many thread blocks as can * be resident in the GPU and no more, rather than launching as many threads as * we have elements. Persistent CTAs loop over blocks of elements until all work * is complete. This can be faster in some cases. In our tests it is faster * for large sorts (and the threshold is higher on compute version 1.1 and earlier * GPUs than it is on compute version 1.2 GPUs. * * @param[out] outKeys Output result of reorderDataKeysOnly() * @param[in] keys Keys to be reordered * @param[in] blockOffsets Start offset for each block * @param[in] offsets Offset of each radix within each block * @param[in] sizes Number of elements in a block * @param[in] numElements Total number of elements * @param[in] totalBlocks Total number of blocks */ template<uint startbit, bool fullBlocks, bool manualCoalesce, bool unflip, bool loop> __global__ void LAUNCH_BOUNDS(SORT_CTA_SIZE) reorderDataKeysOnly(uint *outKeys, uint2 *keys, uint *blockOffsets, uint *offsets, uint *sizes, uint numElements, uint totalBlocks) { __shared__ uint2 sKeys2[SORT_CTA_SIZE]; __shared__ uint sOffsets[16]; __shared__ uint sBlockOffsets[16]; uint *sKeys1 = (uint*)sKeys2; uint blockId = blockIdx.x; while (!loop || blockId < totalBlocks) { uint i = blockId * blockDim.x + threadIdx.x; // handle non-full last block if array is not multiple of 1024 numElements if(!fullBlocks && (((i + 1) << 1) > numElements)) { uint *keys1 = (uint*)keys; uint j = i << 1; sKeys1[threadIdx.x << 1] = (j < numElements) ? keys1[j] : UINT_MAX; j++; sKeys1[(threadIdx.x << 1) + 1] = (j < numElements) ? keys1[j] : UINT_MAX; } else { sKeys2[threadIdx.x] = keys[i]; } if (!manualCoalesce) { if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; } __syncthreads(); uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF; uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x]); } radix = (sKeys1[threadIdx.x + SORT_CTA_SIZE] >> startbit) & 0xF; globalOffset = sOffsets[radix] + threadIdx.x + SORT_CTA_SIZE - sBlockOffsets[radix]; if (fullBlocks || globalOffset < numElements) { outKeys[globalOffset] = floatUnflip<unflip>(sKeys1[threadIdx.x + SORT_CTA_SIZE]); } } else { __shared__ uint sSizes[16]; if(threadIdx.x < 16) { sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks + blockId]; sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x]; sSizes[threadIdx.x] = sizes[threadIdx.x * totalBlocks + blockId]; } __syncthreads(); // 1 half-warp is responsible for writing out all values for 1 radix. // Loops if there are more than 16 values to be written out. // All start indices are rounded down to the nearest multiple of 16, and // all end indices are rounded up to the nearest multiple of 16. // Thus it can do extra work if the start and end indices are not multiples of 16 // This is bounded by a factor of 2 (it can do 2X more work at most). const uint halfWarpID = threadIdx.x >> 4; const uint halfWarpOffset = threadIdx.x & 0xF; const uint leadingInvalid = sOffsets[halfWarpID] & 0xF; uint startPos = sOffsets[halfWarpID] & 0xFFFFFFF0; uint endPos = (sOffsets[halfWarpID] + sSizes[halfWarpID]) + 15 - ((sOffsets[halfWarpID] + sSizes[halfWarpID] - 1) & 0xF); uint numIterations = endPos - startPos; uint outOffset = startPos + halfWarpOffset; uint inOffset = sBlockOffsets[halfWarpID] - leadingInvalid + halfWarpOffset; for(uint j = 0; j < numIterations; j += 16, outOffset += 16, inOffset += 16) { if( (outOffset >= sOffsets[halfWarpID]) && (inOffset - sBlockOffsets[halfWarpID] < sSizes[halfWarpID])) { if(blockId < totalBlocks - 1 || outOffset < numElements) { outKeys[outOffset] = floatUnflip<unflip>(sKeys1[inOffset]); } } } } if (loop) { blockId += gridDim.x; __syncthreads(); } else break; } } /** @} */ // end radixsort functions /** @} */ // end cudpp_kernel
e91ff54b763aaf6823552a562aca0012109c267a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include <time.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __global__ functions 2) Enable a simulation of password cracking in the absence of library with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o fourpass fourpass.cu ./fourpass Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ /**************************************************************************** This function returns 1 if the attempt at cracking the password is identical to the plain text password string stored in the program. Otherwise,it returns 0. *****************************************************************************/ __device__ int is_a_match(char *attempt) { char plain_password1[] = "RA12"; char plain_password2[] = "TH23"; char plain_password3[] = "MA37"; char plain_password4[] = "AR94"; char *a = attempt; char *b = attempt; char *c = attempt; char *d = attempt; char *p1 = plain_password1; char *p2 = plain_password2; char *p3 = plain_password3; char *p4 = plain_password4; while(*a == *p1) { if(*a == '\0') { printf("Password: %s\n",plain_password1); break; } a++; p1++; } while(*b == *p2) { if(*b == '\0') { printf("Password: %s\n",plain_password2); break; } b++; p2++; } while(*c == *p3) { if(*c == '\0') { printf("Password: %s\n",plain_password3); break; } c++; p3++; } while(*d == *p4) { if(*d == '\0') { printf("Password: %s\n",plain_password4); return 1; } d++; p4++; } return 0; } __global__ void kernel() { char i1,i2,i3,i4; char password[7]; password[6] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char firstMatch = i; char secondMatch = j; password[0] = firstMatch; password[1] = secondMatch; for(i1='0'; i1<='9'; i1++){ for(i2='0'; i2<='9'; i2++){ for(i3='0'; i3<='9'; i3++){ for(i4='0'; i4<='9'; i4++){ password[2] = i1; password[3] = i2; password[4] = i3; password[5] = i4; if(is_a_match(password)) { } else { //printf("tried: %s\n", password); } } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipLaunchKernelGGL(( kernel) , dim3(26),dim3(26), 0, 0, ); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
e91ff54b763aaf6823552a562aca0012109c267a.cu
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __global__ functions 2) Enable a simulation of password cracking in the absence of library with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o fourpass fourpass.cu ./fourpass Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ /**************************************************************************** This function returns 1 if the attempt at cracking the password is identical to the plain text password string stored in the program. Otherwise,it returns 0. *****************************************************************************/ __device__ int is_a_match(char *attempt) { char plain_password1[] = "RA12"; char plain_password2[] = "TH23"; char plain_password3[] = "MA37"; char plain_password4[] = "AR94"; char *a = attempt; char *b = attempt; char *c = attempt; char *d = attempt; char *p1 = plain_password1; char *p2 = plain_password2; char *p3 = plain_password3; char *p4 = plain_password4; while(*a == *p1) { if(*a == '\0') { printf("Password: %s\n",plain_password1); break; } a++; p1++; } while(*b == *p2) { if(*b == '\0') { printf("Password: %s\n",plain_password2); break; } b++; p2++; } while(*c == *p3) { if(*c == '\0') { printf("Password: %s\n",plain_password3); break; } c++; p3++; } while(*d == *p4) { if(*d == '\0') { printf("Password: %s\n",plain_password4); return 1; } d++; p4++; } return 0; } __global__ void kernel() { char i1,i2,i3,i4; char password[7]; password[6] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char firstMatch = i; char secondMatch = j; password[0] = firstMatch; password[1] = secondMatch; for(i1='0'; i1<='9'; i1++){ for(i2='0'; i2<='9'; i2++){ for(i3='0'; i3<='9'; i3++){ for(i4='0'; i4<='9'; i4++){ password[2] = i1; password[3] = i2; password[4] = i3; password[5] = i4; if(is_a_match(password)) { } else { //printf("tried: %s\n", password); } } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); kernel <<<26,26>>>(); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
57fa9afe985e10f2d5fff11ad963a2228b234b75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "DomaineMath_GPU.h" #include "FractalMath.h" #include "JuliaMath.h" #include "MandelbrotMath.h" using namespace gpu; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __device__ void fractal(uchar4* ptrDevPixels, uint width, uint height, DomaineMath domaineMath, uint n, FractalMath* fractalMath); __global__ void julia(uchar4* ptrDevPixels, uint width, uint height, DomaineMath domaineMath, uint n); __global__ void mandelbrot(uchar4* ptrDevPixels, uint width, uint height, DomaineMath domaineMath, uint n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __device__ void fractal(uchar4* ptrDevPixels, int w, int h, DomaineMath mathDomain, int n, FractalMath* fractalMath) { const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; uchar4 color; // math double x; double y; // screen int pixelI; int pixelJ; int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &pixelI, &pixelJ); // update (pixelI, pixelJ) mathDomain.toXY(pixelI, pixelJ, &x, &y); // (i,j) -> (x,y) fractalMath->colorXY(&color, x, y, n); // update color ptrDevPixels[s] = color; s += NB_THREAD; } } __global__ void julia(uchar4* ptrDevPixels, int width, int height, DomaineMath domaineMath, int n) { float c1 = -0.12; float c2 = 0.85; FractalMath* fractalMath = new JuliaMath(c1, c2); fractal(ptrDevPixels, width, height, domaineMath, n, fractalMath); delete fractalMath; } __global__ void mandelbrot(uchar4* ptrDevPixels, int width, int height, DomaineMath domaineMath, int n) { FractalMath* fractalMath = new MandelbrotMath(); fractal(ptrDevPixels, width, height, domaineMath, n, fractalMath); delete fractalMath; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
57fa9afe985e10f2d5fff11ad963a2228b234b75.cu
#include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "DomaineMath_GPU.h" #include "FractalMath.h" #include "JuliaMath.h" #include "MandelbrotMath.h" using namespace gpu; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __device__ void fractal(uchar4* ptrDevPixels, uint width, uint height, DomaineMath domaineMath, uint n, FractalMath* fractalMath); __global__ void julia(uchar4* ptrDevPixels, uint width, uint height, DomaineMath domaineMath, uint n); __global__ void mandelbrot(uchar4* ptrDevPixels, uint width, uint height, DomaineMath domaineMath, uint n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __device__ void fractal(uchar4* ptrDevPixels, int w, int h, DomaineMath mathDomain, int n, FractalMath* fractalMath) { const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; uchar4 color; // math double x; double y; // screen int pixelI; int pixelJ; int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &pixelI, &pixelJ); // update (pixelI, pixelJ) mathDomain.toXY(pixelI, pixelJ, &x, &y); // (i,j) -> (x,y) fractalMath->colorXY(&color, x, y, n); // update color ptrDevPixels[s] = color; s += NB_THREAD; } } __global__ void julia(uchar4* ptrDevPixels, int width, int height, DomaineMath domaineMath, int n) { float c1 = -0.12; float c2 = 0.85; FractalMath* fractalMath = new JuliaMath(c1, c2); fractal(ptrDevPixels, width, height, domaineMath, n, fractalMath); delete fractalMath; } __global__ void mandelbrot(uchar4* ptrDevPixels, int width, int height, DomaineMath domaineMath, int n) { FractalMath* fractalMath = new MandelbrotMath(); fractal(ptrDevPixels, width, height, domaineMath, n, fractalMath); delete fractalMath; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
7ab7f0f0b30004ce2207cbd3e12c6a2cde922827.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top, const bool preforward_flag) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_, preforward_flag); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything, we use it here to avoid having // to allocate new GPU memory to accumulate intermediate results. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } // Clear scratch memory to prevent interfering with backward (see #6202). caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom, const bool prebackward_flag) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
7ab7f0f0b30004ce2207cbd3e12c6a2cde922827.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top, const bool preforward_flag) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_, preforward_flag); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything, we use it here to avoid having // to allocate new GPU memory to accumulate intermediate results. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } // Clear scratch memory to prevent interfering with backward (see #6202). caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom, const bool prebackward_flag) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
622eba1321d2fd5a0e03d828bbb5edcdaed85713.hip
// !!! This is a file automatically generated by hipify!!! #include "chainerx/cuda/cuda_device.h" #include <cstdint> #include <hip/hip_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/device.h" #include "chainerx/dtype.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct CopyImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType a, CudaType& out) { out = a; } }; } // namespace void CudaDevice::Copy(const Array& a, const Array& out) { CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(CopyImpl<T>{}, a, out); }); } namespace { template <typename InT, typename OutT> struct AsTypeImpl { using InCudaType = cuda_internal::DataType<InT>; using OutCudaType = cuda_internal::DataType<OutT>; __device__ void operator()(int64_t /*i*/, InCudaType a, OutCudaType& out) { out = static_cast<OutCudaType>(a); } }; } // namespace void CudaDevice::AsType(const Array& a, const Array& out) { CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{index()}; auto do_astype = [&](auto in_pt, auto out_pt) { using InT = typename decltype(in_pt)::type; using OutT = typename decltype(out_pt)::type; Elementwise<const InT, OutT>(AsTypeImpl<InT, OutT>{}, a, out); }; VisitDtype(out.dtype(), [&](auto out_pt) { VisitDtype(a.dtype(), do_astype, out_pt); }); } } // namespace cuda } // namespace chainerx
622eba1321d2fd5a0e03d828bbb5edcdaed85713.cu
#include "chainerx/cuda/cuda_device.h" #include <cstdint> #include <cuda_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/device.h" #include "chainerx/dtype.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct CopyImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType a, CudaType& out) { out = a; } }; } // namespace void CudaDevice::Copy(const Array& a, const Array& out) { CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(CopyImpl<T>{}, a, out); }); } namespace { template <typename InT, typename OutT> struct AsTypeImpl { using InCudaType = cuda_internal::DataType<InT>; using OutCudaType = cuda_internal::DataType<OutT>; __device__ void operator()(int64_t /*i*/, InCudaType a, OutCudaType& out) { out = static_cast<OutCudaType>(a); } }; } // namespace void CudaDevice::AsType(const Array& a, const Array& out) { CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{index()}; auto do_astype = [&](auto in_pt, auto out_pt) { using InT = typename decltype(in_pt)::type; using OutT = typename decltype(out_pt)::type; Elementwise<const InT, OutT>(AsTypeImpl<InT, OutT>{}, a, out); }; VisitDtype(out.dtype(), [&](auto out_pt) { VisitDtype(a.dtype(), do_astype, out_pt); }); } } // namespace cuda } // namespace chainerx
ad494a6a42cb16aa5ea0b9388413034d7ae94816.hip
// !!! This is a file automatically generated by hipify!!! /* * RecursionStack.cpp * * Created on: 16-Oct-2015 * Author: debarshi */ #include "RecursionStack.h" namespace BK_GPU { RecursionStack::RecursionStack(int size,hipStream_t &stream) { // TODO Auto-generated constructor stub top=0; CudaError(hipMalloc(&elements,sizeof(int)*size)); } RecursionStack::~RecursionStack() { CudaError(hipFree(this->elements)); } } /* namespace BK_GPU */
ad494a6a42cb16aa5ea0b9388413034d7ae94816.cu
/* * RecursionStack.cpp * * Created on: 16-Oct-2015 * Author: debarshi */ #include "RecursionStack.h" namespace BK_GPU { RecursionStack::RecursionStack(int size,cudaStream_t &stream) { // TODO Auto-generated constructor stub top=0; CudaError(cudaMalloc(&elements,sizeof(int)*size)); } RecursionStack::~RecursionStack() { CudaError(cudaFree(this->elements)); } } /* namespace BK_GPU */
17ab676407a15a082a506f723dd7a56dbadee410.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2015, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <Array.hpp> #include <common/jit/NaryNode.hpp> #include <err_cuda.hpp> #include <kernel/select.hpp> #include <scalar.hpp> #include <select.hpp> using common::NaryNode; using common::Node_ptr; namespace cuda { template<typename T> void select(Array<T> &out, const Array<char> &cond, const Array<T> &a, const Array<T> &b) { kernel::select<T>(out, cond, a, b, out.ndims()); } template<typename T, bool flip> void select_scalar(Array<T> &out, const Array<char> &cond, const Array<T> &a, const double &b) { kernel::select_scalar<T, flip>(out, cond, a, b, out.ndims()); } template<typename T> Array<T> createSelectNode(const Array<char> &cond, const Array<T> &a, const Array<T> &b, const af::dim4 &odims) { auto cond_node = cond.getNode(); auto a_node = a.getNode(); auto b_node = b.getNode(); int height = ::max(a_node->getHeight(), b_node->getHeight()); height = ::max(height, cond_node->getHeight()) + 1; NaryNode *node = new NaryNode(getFullName<T>(), shortname<T>(true), "__select", 3, {{cond_node, a_node, b_node}}, (int)af_select_t, height); Array<T> out = createNodeArray<T>(odims, Node_ptr(node)); return out; } template<typename T, bool flip> Array<T> createSelectNode(const Array<char> &cond, const Array<T> &a, const double &b_val, const af::dim4 &odims) { auto cond_node = cond.getNode(); auto a_node = a.getNode(); Array<T> b = createScalarNode<T>(odims, scalar<T>(b_val)); auto b_node = b.getNode(); int height = ::max(a_node->getHeight(), b_node->getHeight()); height = ::max(height, cond_node->getHeight()) + 1; NaryNode *node = new NaryNode( getFullName<T>(), shortname<T>(true), flip ? "__not_select" : "__select", 3, {{cond_node, a_node, b_node}}, (int)(flip ? af_not_select_t : af_select_t), height); Array<T> out = createNodeArray<T>(odims, Node_ptr(node)); return out; } #define INSTANTIATE(T) \ template Array<T> createSelectNode<T>( \ const Array<char> &cond, const Array<T> &a, const Array<T> &b, \ const af::dim4 &odims); \ template Array<T> createSelectNode<T, true>( \ const Array<char> &cond, const Array<T> &a, const double &b_val, \ const af::dim4 &odims); \ template Array<T> createSelectNode<T, false>( \ const Array<char> &cond, const Array<T> &a, const double &b_val, \ const af::dim4 &odims); \ template void select<T>(Array<T> & out, const Array<char> &cond, \ const Array<T> &a, const Array<T> &b); \ template void select_scalar<T, true>(Array<T> & out, \ const Array<char> &cond, \ const Array<T> &a, const double &b); \ template void select_scalar<T, false>(Array<T> & out, \ const Array<char> &cond, \ const Array<T> &a, const double &b) INSTANTIATE(float); INSTANTIATE(double); INSTANTIATE(cfloat); INSTANTIATE(cdouble); INSTANTIATE(int); INSTANTIATE(uint); INSTANTIATE(intl); INSTANTIATE(uintl); INSTANTIATE(char); INSTANTIATE(uchar); INSTANTIATE(short); INSTANTIATE(ushort); } // namespace cuda
17ab676407a15a082a506f723dd7a56dbadee410.cu
/******************************************************* * Copyright (c) 2015, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <Array.hpp> #include <common/jit/NaryNode.hpp> #include <err_cuda.hpp> #include <kernel/select.hpp> #include <scalar.hpp> #include <select.hpp> using common::NaryNode; using common::Node_ptr; namespace cuda { template<typename T> void select(Array<T> &out, const Array<char> &cond, const Array<T> &a, const Array<T> &b) { kernel::select<T>(out, cond, a, b, out.ndims()); } template<typename T, bool flip> void select_scalar(Array<T> &out, const Array<char> &cond, const Array<T> &a, const double &b) { kernel::select_scalar<T, flip>(out, cond, a, b, out.ndims()); } template<typename T> Array<T> createSelectNode(const Array<char> &cond, const Array<T> &a, const Array<T> &b, const af::dim4 &odims) { auto cond_node = cond.getNode(); auto a_node = a.getNode(); auto b_node = b.getNode(); int height = std::max(a_node->getHeight(), b_node->getHeight()); height = std::max(height, cond_node->getHeight()) + 1; NaryNode *node = new NaryNode(getFullName<T>(), shortname<T>(true), "__select", 3, {{cond_node, a_node, b_node}}, (int)af_select_t, height); Array<T> out = createNodeArray<T>(odims, Node_ptr(node)); return out; } template<typename T, bool flip> Array<T> createSelectNode(const Array<char> &cond, const Array<T> &a, const double &b_val, const af::dim4 &odims) { auto cond_node = cond.getNode(); auto a_node = a.getNode(); Array<T> b = createScalarNode<T>(odims, scalar<T>(b_val)); auto b_node = b.getNode(); int height = std::max(a_node->getHeight(), b_node->getHeight()); height = std::max(height, cond_node->getHeight()) + 1; NaryNode *node = new NaryNode( getFullName<T>(), shortname<T>(true), flip ? "__not_select" : "__select", 3, {{cond_node, a_node, b_node}}, (int)(flip ? af_not_select_t : af_select_t), height); Array<T> out = createNodeArray<T>(odims, Node_ptr(node)); return out; } #define INSTANTIATE(T) \ template Array<T> createSelectNode<T>( \ const Array<char> &cond, const Array<T> &a, const Array<T> &b, \ const af::dim4 &odims); \ template Array<T> createSelectNode<T, true>( \ const Array<char> &cond, const Array<T> &a, const double &b_val, \ const af::dim4 &odims); \ template Array<T> createSelectNode<T, false>( \ const Array<char> &cond, const Array<T> &a, const double &b_val, \ const af::dim4 &odims); \ template void select<T>(Array<T> & out, const Array<char> &cond, \ const Array<T> &a, const Array<T> &b); \ template void select_scalar<T, true>(Array<T> & out, \ const Array<char> &cond, \ const Array<T> &a, const double &b); \ template void select_scalar<T, false>(Array<T> & out, \ const Array<char> &cond, \ const Array<T> &a, const double &b) INSTANTIATE(float); INSTANTIATE(double); INSTANTIATE(cfloat); INSTANTIATE(cdouble); INSTANTIATE(int); INSTANTIATE(uint); INSTANTIATE(intl); INSTANTIATE(uintl); INSTANTIATE(char); INSTANTIATE(uchar); INSTANTIATE(short); INSTANTIATE(ushort); } // namespace cuda
f66a09a14b1c1291d5805e50cd2ed41d48062919.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*#include <vector>*/ /*#include "caffe/filler.hpp"*/ /*#include "caffe/layers/center_loss_layer.hpp"*/ /*#include "caffe/util/math_functions.hpp"*/ /*namespace caffe {*/ /*template <typename Dtype>*/ /*__global__ void Compute_distance_cy_data_gpu(int nthreads, const int K, const Dtype* bottom,*/ /*const Dtype* label, const Dtype* center, Dtype* distance, Dtype* cy) {*/ /*CUDA_KERNEL_LOOP(index, nthreads) {*/ /*int m = index / K;*/ /*int k = index % K;*/ /*const int label_value = static_cast<int>(label[m]);*/ /*// distance(i) = x(i) - c_{y(i)}*/ /*distance[index] = bottom[index] - center[label_value * K + k];*/ /*// c_y(i) = c_{y(i)}*/ /*cy[index] = center[label_value * K + k];*/ /*}*/ /*}*/ /*template <typename Dtype>*/ /*__global__ void Compute_lij_data_gpu(int nthreads, const int M, const Dtype* label,*/ /*Dtype* lij) {*/ /*CUDA_KERNEL_LOOP(index, nthreads) {*/ /*int i = index / M;*/ /*int j = index % M;*/ /*const int label_value_i = static_cast<int>(label[i]);*/ /*const int label_value_j = static_cast<int>(label[j]);*/ /*if (label_value_i == label_value_j) {*/ /*lij[index] = 1;*/ /*}*/ /*else {*/ /*lij[index] = -1;*/ /*}*/ /*}*/ /*}*/ /*template <typename Dtype>*/ /*__global__ void Compute_mat_data_gpu(int nthreads, const Dtype* pa,*/ /*Dtype* mat) {*/ /*CUDA_KERNEL_LOOP(index, nthreads) {*/ /*if (pa[index] > 0) {*/ /*mat[index] = 1;*/ /*}*/ /*else {*/ /*mat[index] = 0;*/ /*}*/ /*}*/ /*}*/ /*template <typename Dtype>*/ /*__global__ void Compute_center_diff_gpu(int nthreads, const int M, const int K, */ /*const Dtype* label, const Dtype* distance, Dtype* variation_sum, */ /*Dtype* center_diff) {*/ /*CUDA_KERNEL_LOOP(index, nthreads) {*/ /*int count = 0;*/ /*for (int m = 0; m < M; m++) {*/ /*const int label_value = static_cast<int>(label[m]);*/ /*if (label_value == index) {*/ /*count++;*/ /*for (int k = 0; k < K; k++) {*/ /*variation_sum[index * K + k] -= distance[m * K + k];*/ /*}*/ /*}*/ /*}*/ /*for (int k = 0; k < K; k++) {*/ /*center_diff[index * K + k] = variation_sum[index * K + k] /(count + (Dtype)1.);*/ /*}*/ /*}*/ /*}*/ /*template <typename Dtype>*/ /*void CenterLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,*/ /*const vector<Blob<Dtype>*>& top) {*/ /*int nthreads = M_ * K_;*/ /*Blob<Dtype> cy;*/ /*cy.ReshapeLike(*bottom[0]);*/ /*Compute_distance_cy_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),*/ /*CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, bottom[0]->gpu_data(), bottom[1]->gpu_data(),*/ /*this->blobs_[0]->gpu_data(), distance_.mutable_gpu_data(), cy.mutable_gpu_data());*/ /*nthreads = M_ * M_; */ /*Compute_lij_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),*/ /*CAFFE_CUDA_NUM_THREADS>>>(nthreads, M_, bottom[1]->gpu_data(),*/ /*lij_.mutable_gpu_data());*/ /*Blob<Dtype> xi, cyj;*/ /*xi.ReshapeLike(distance_mat_);*/ /*cyj.ReshapeLike(distance_mat_);*/ /*// cyj = [cy; cy; ...; cy]*/ /*caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, M_ * K_, 1, 1, Ic_.gpu_data(), cy.gpu_data(), 0., cyj.mutable_gpu_data());*/ /*// xi = [x0; x0; ...; x0; x1; x1; ...; x1; x(M - 1); x(M - 1); ...; x(M - 1)]*/ /*caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, M_ * K_, K_, 1, bottom[0]->gpu_data(), If_.gpu_data(), 0., xi.mutable_gpu_data());*/ /*// xi - cyj*/ /*caffe_gpu_sub(distance_mat_.count(), xi.gpu_data(), cyj.gpu_data(), distance_mat_.mutable_gpu_data());*/ /*Blob<Dtype> distance_square;*/ /*distance_square.ReshapeLike(distance_mat_);*/ /*// (xi - cyj) .* (xi - cyj)*/ /*caffe_gpu_powx(distance_mat_.count(), distance_mat_.gpu_data(), Dtype(2), distance_square.mutable_gpu_data());*/ /*Blob<Dtype> pa;*/ /*pa.ReshapeLike(mat_);*/ /*// (xi - cyj)T(xi - cyj)*/ /*caffe_gpu_gemv<Dtype>(CblasNoTrans, M_ * M_, K_, 1., distance_square.gpu_data(), Im_.gpu_data(), 0., distance_norm_mat_.mutable_gpu_data());*/ /*// ||xi - cyj||*/ /*caffe_gpu_powx(mat_.count(), distance_norm_mat_.gpu_data(), Dtype(0.5), distance_norm_mat_.mutable_gpu_data());*/ /*// ||xi - cyj|| - 1*/ /*caffe_gpu_sub(mat_.count(), distance_norm_mat_.gpu_data(), Ia_.gpu_data(), pa.mutable_gpu_data());*/ /*// lij(||xi - cyj|| - 1)*/ /*caffe_gpu_mul(mat_.count(), lij_.gpu_data(), pa.gpu_data(), pa.mutable_gpu_data());*/ /*// lij(||xi - cyj|| - 1) + alpha*/ /*caffe_gpu_axpy(mat_.count(), margin_, Ia_.gpu_data(), pa.mutable_gpu_data());*/ /*Compute_mat_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),*/ /*CAFFE_CUDA_NUM_THREADS>>>(nthreads, pa.gpu_data(),*/ /*mat_.mutable_gpu_data());*/ /*Dtype dot;*/ /*// caffe_gpu_dot(M_ * K_, distance_.gpu_data(), distance_.gpu_data(), &dot);*/ /*caffe_gpu_dot(M_ * M_, mat_.gpu_data(), pa.gpu_data(), &dot);*/ /*Dtype loss = dot / M_ / Dtype(2);*/ /*top[0]->mutable_gpu_data()[0] = loss;*/ /*}*/ /*template <typename Dtype>*/ /*void CenterLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,*/ /*const vector<bool>& propagate_down,*/ /*const vector<Blob<Dtype>*>& bottom) {*/ /*int nthreads = N_;*/ /*caffe_gpu_set(N_ * K_, (Dtype)0., variation_sum_.mutable_cpu_data());*/ /*Compute_center_diff_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),*/ /*CAFFE_CUDA_NUM_THREADS>>>(nthreads, M_, K_, bottom[1]->gpu_data(), distance_.gpu_data(), */ /*variation_sum_.mutable_cpu_data(), this->blobs_[0]->mutable_gpu_diff());*/ /*if (propagate_down[0]) {*/ /*// caffe_gpu_scale(M_ * K_, top[0]->cpu_diff()[0] / M_, */ /*// distance_.gpu_data(), bottom[0]->mutable_gpu_diff());*/ /*Blob<Dtype> scale;*/ /*scale.ReshapeLike(mat_);*/ /*// scale: lij / ||xi - cyj||*/ /*caffe_gpu_div(mat_.count(), lij_.gpu_data(), distance_norm_mat_.gpu_data(), scale.mutable_gpu_data());*/ /*// caffe_copy(mat_.count(), lij_.cpu_data(), scale.mutable_cpu_data());*/ /*// Iij * lij / ||xi - cyj||*/ /*caffe_gpu_mul(mat_.count(), mat_.gpu_data(), scale.gpu_data(), scale.mutable_gpu_data());*/ /*Blob<Dtype> matI;*/ /*matI.ReshapeLike(distance_mat_);*/ /*// [mat_ * scale, mat_ * scale, ..., mat_ * scale]*/ /*caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_ * M_, K_, 1, 1, scale.gpu_data(), Im_.gpu_data(), 0., matI.mutable_gpu_data());*/ /*// (xi - cyj) .* [mat_ * scale, mat_ * scale, ..., mat_ * scale]*/ /*caffe_gpu_mul(distance_mat_.count(), distance_mat_.gpu_data(), matI.gpu_data(), matI.mutable_gpu_data());*/ /*caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, M_ * M_, 1, Ib_.gpu_data(), matI.gpu_data(), 0., bottom[0]->mutable_gpu_diff());*/ /*caffe_gpu_scal(M_ * K_, top[0]->gpu_diff()[0] / (2 * M_), bottom[0]->mutable_gpu_diff());*/ /*}*/ /*if (propagate_down[1]) {*/ /*LOG(FATAL) << this->type()*/ /*<< " Layer cannot backpropagate to label inputs.";*/ /*}*/ /*}*/ /*INSTANTIATE_LAYER_GPU_FUNCS(CenterLossLayer);*/ /*} // namespace caffe*/ #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/center_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #include <iostream> namespace caffe { template <typename Dtype> __global__ void Compute_distance_data_gpu(int nthreads, const int K, const Dtype* bottom, const Dtype* label, const Dtype* center, Dtype* distance) { CUDA_KERNEL_LOOP(index, nthreads) { int m = index / K; int k = index % K; const int label_value = static_cast<int>(label[m]); // distance(i) = x(i) - c_{y(i)} distance[index] = bottom[index] - center[label_value * K + k]; } } template <typename Dtype> __global__ void Compute_lij_data_gpu(int nthreads, const int N, const Dtype* label, Dtype* lij) { CUDA_KERNEL_LOOP(index, nthreads) { int i = index / N; int j = index % N; const int label_value = static_cast<int>(label[i]); if (label_value == j) { lij[index] = 1; } else { lij[index] = -1; } } } template <typename Dtype> __global__ void Compute_distance_mat_data_gpu(int nthreads, const int N, const int K, const Dtype* bottom, const Dtype* center, Dtype* distance_mat) { CUDA_KERNEL_LOOP(index, nthreads) { int m = index / K / N; int n = index / K % N; int k = index % K; distance_mat[index] = bottom[m * K + k] - center[n * K + k]; } } template <typename Dtype> __global__ void Compute_mat_data_gpu(int nthreads, const Dtype* pa, Dtype* mat) { CUDA_KERNEL_LOOP(index, nthreads) { if (pa[index] > 0) { mat[index] = 1; } else { mat[index] = 0; } } } template <typename Dtype> __global__ void Compute_center_diff_gpu(int nthreads, const int M, const int K, const Dtype* label, const Dtype* distance, Dtype* variation_sum, Dtype* center_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int count = 0; for (int m = 0; m < M; m++) { const int label_value = static_cast<int>(label[m]); if (label_value == index) { count++; for (int k = 0; k < K; k++) { variation_sum[index * K + k] -= distance[m * K + k]; } } } for (int k = 0; k < K; k++) { center_diff[index * K + k] = variation_sum[index * K + k] /(count + (Dtype)1.); } } } template <typename Dtype> void CenterLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int nthreads = M_ * K_; hipLaunchKernelGGL(( Compute_distance_data_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, K_, bottom[0]->gpu_data(), bottom[1]->gpu_data(), this->blobs_[0]->gpu_data(), distance_.mutable_gpu_data()); nthreads = M_ * N_; hipLaunchKernelGGL(( Compute_lij_data_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, bottom[1]->gpu_data(), lij_.mutable_gpu_data()); nthreads = M_ * N_ * K_; hipLaunchKernelGGL(( Compute_distance_mat_data_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, K_, bottom[0]->gpu_data(), this->blobs_[0]->gpu_data(), distance_mat_.mutable_gpu_data()); Blob<Dtype> distance_square; distance_square.ReshapeLike(distance_mat_); // (xi - cj) .* (xi - cj) caffe_gpu_powx(distance_mat_.count(), distance_mat_.gpu_data(), Dtype(2), distance_square.mutable_gpu_data()); Blob<Dtype> pa; pa.ReshapeLike(mat_); // (xi - cj)T(xi - cj) caffe_gpu_gemv<Dtype>(CblasNoTrans, M_ * N_, K_, 1., distance_square.gpu_data(), Im_.gpu_data(), 0., distance_norm_mat_.mutable_gpu_data()); // ||xi - cj|| caffe_gpu_powx(mat_.count(), distance_norm_mat_.gpu_data(), Dtype(0.5), distance_norm_mat_.mutable_gpu_data()); // ||xi - cj|| - 1 caffe_gpu_sub(mat_.count(), distance_norm_mat_.gpu_data(), Ia_.gpu_data(), pa.mutable_gpu_data()); // lij(||xi - cj|| - 1) caffe_gpu_mul(mat_.count(), lij_.gpu_data(), pa.gpu_data(), pa.mutable_gpu_data()); // lij(||xi - cj|| - 1) + alpha caffe_gpu_axpy(mat_.count(), margin_, Ia_.gpu_data(), pa.mutable_gpu_data()); nthreads = M_ * N_; hipLaunchKernelGGL(( Compute_mat_data_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, pa.gpu_data(), mat_.mutable_gpu_data()); Dtype dot; // caffe_gpu_dot(M_ * K_, distance_.gpu_data(), distance_.gpu_data(), &dot); caffe_gpu_dot(M_ * N_, mat_.gpu_data(), pa.gpu_data(), &dot); Dtype loss = dot / M_; top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void CenterLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int nthreads = N_; caffe_gpu_set(N_ * K_, (Dtype)0., variation_sum_.mutable_gpu_data()); hipLaunchKernelGGL(( Compute_center_diff_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, M_, K_, bottom[1]->gpu_data(), distance_.gpu_data(), variation_sum_.mutable_gpu_data(), this->blobs_[0]->mutable_gpu_diff()); if (propagate_down[0]) { // caffe_gpu_scale(M_ * K_, top[0]->cpu_diff()[0] / M_, // distance_.gpu_data(), bottom[0]->mutable_gpu_diff()); Blob<Dtype> scale; scale.ReshapeLike(mat_); // scale: lij / ||xi - cj|| caffe_gpu_div(mat_.count(), lij_.gpu_data(), distance_norm_mat_.gpu_data(), scale.mutable_gpu_data()); // caffe_copy(mat_.count(), lij_.cpu_data(), scale.mutable_cpu_data()); // Iij * lij / ||xi - cj|| caffe_gpu_mul(mat_.count(), mat_.gpu_data(), scale.gpu_data(), scale.mutable_gpu_data()); Blob<Dtype> matI; matI.ReshapeLike(distance_mat_); // [mat_ * scale, mat_ * scale, ..., mat_ * scale] caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_ * N_, K_, 1, 1, scale.gpu_data(), Im_.gpu_data(), 0., matI.mutable_gpu_data()); // (xi - cj) .* [mat_ * scale, mat_ * scale, ..., mat_ * scale] caffe_gpu_mul(distance_mat_.count(), distance_mat_.gpu_data(), matI.gpu_data(), matI.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, M_ * N_, 1, Ib_.gpu_data(), matI.gpu_data(), 0., bottom[0]->mutable_gpu_diff()); caffe_gpu_scal(M_ * K_, top[0]->cpu_diff()[0] / M_, bottom[0]->mutable_gpu_diff()); } if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } } INSTANTIATE_LAYER_GPU_FUNCS(CenterLossLayer); }
f66a09a14b1c1291d5805e50cd2ed41d48062919.cu
/*#include <vector>*/ /*#include "caffe/filler.hpp"*/ /*#include "caffe/layers/center_loss_layer.hpp"*/ /*#include "caffe/util/math_functions.hpp"*/ /*namespace caffe {*/ /*template <typename Dtype>*/ /*__global__ void Compute_distance_cy_data_gpu(int nthreads, const int K, const Dtype* bottom,*/ /*const Dtype* label, const Dtype* center, Dtype* distance, Dtype* cy) {*/ /*CUDA_KERNEL_LOOP(index, nthreads) {*/ /*int m = index / K;*/ /*int k = index % K;*/ /*const int label_value = static_cast<int>(label[m]);*/ /*// distance(i) = x(i) - c_{y(i)}*/ /*distance[index] = bottom[index] - center[label_value * K + k];*/ /*// c_y(i) = c_{y(i)}*/ /*cy[index] = center[label_value * K + k];*/ /*}*/ /*}*/ /*template <typename Dtype>*/ /*__global__ void Compute_lij_data_gpu(int nthreads, const int M, const Dtype* label,*/ /*Dtype* lij) {*/ /*CUDA_KERNEL_LOOP(index, nthreads) {*/ /*int i = index / M;*/ /*int j = index % M;*/ /*const int label_value_i = static_cast<int>(label[i]);*/ /*const int label_value_j = static_cast<int>(label[j]);*/ /*if (label_value_i == label_value_j) {*/ /*lij[index] = 1;*/ /*}*/ /*else {*/ /*lij[index] = -1;*/ /*}*/ /*}*/ /*}*/ /*template <typename Dtype>*/ /*__global__ void Compute_mat_data_gpu(int nthreads, const Dtype* pa,*/ /*Dtype* mat) {*/ /*CUDA_KERNEL_LOOP(index, nthreads) {*/ /*if (pa[index] > 0) {*/ /*mat[index] = 1;*/ /*}*/ /*else {*/ /*mat[index] = 0;*/ /*}*/ /*}*/ /*}*/ /*template <typename Dtype>*/ /*__global__ void Compute_center_diff_gpu(int nthreads, const int M, const int K, */ /*const Dtype* label, const Dtype* distance, Dtype* variation_sum, */ /*Dtype* center_diff) {*/ /*CUDA_KERNEL_LOOP(index, nthreads) {*/ /*int count = 0;*/ /*for (int m = 0; m < M; m++) {*/ /*const int label_value = static_cast<int>(label[m]);*/ /*if (label_value == index) {*/ /*count++;*/ /*for (int k = 0; k < K; k++) {*/ /*variation_sum[index * K + k] -= distance[m * K + k];*/ /*}*/ /*}*/ /*}*/ /*for (int k = 0; k < K; k++) {*/ /*center_diff[index * K + k] = variation_sum[index * K + k] /(count + (Dtype)1.);*/ /*}*/ /*}*/ /*}*/ /*template <typename Dtype>*/ /*void CenterLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,*/ /*const vector<Blob<Dtype>*>& top) {*/ /*int nthreads = M_ * K_;*/ /*Blob<Dtype> cy;*/ /*cy.ReshapeLike(*bottom[0]);*/ /*Compute_distance_cy_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),*/ /*CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, bottom[0]->gpu_data(), bottom[1]->gpu_data(),*/ /*this->blobs_[0]->gpu_data(), distance_.mutable_gpu_data(), cy.mutable_gpu_data());*/ /*nthreads = M_ * M_; */ /*Compute_lij_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),*/ /*CAFFE_CUDA_NUM_THREADS>>>(nthreads, M_, bottom[1]->gpu_data(),*/ /*lij_.mutable_gpu_data());*/ /*Blob<Dtype> xi, cyj;*/ /*xi.ReshapeLike(distance_mat_);*/ /*cyj.ReshapeLike(distance_mat_);*/ /*// cyj = [cy; cy; ...; cy]*/ /*caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, M_ * K_, 1, 1, Ic_.gpu_data(), cy.gpu_data(), 0., cyj.mutable_gpu_data());*/ /*// xi = [x0; x0; ...; x0; x1; x1; ...; x1; x(M - 1); x(M - 1); ...; x(M - 1)]*/ /*caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, M_ * K_, K_, 1, bottom[0]->gpu_data(), If_.gpu_data(), 0., xi.mutable_gpu_data());*/ /*// xi - cyj*/ /*caffe_gpu_sub(distance_mat_.count(), xi.gpu_data(), cyj.gpu_data(), distance_mat_.mutable_gpu_data());*/ /*Blob<Dtype> distance_square;*/ /*distance_square.ReshapeLike(distance_mat_);*/ /*// (xi - cyj) .* (xi - cyj)*/ /*caffe_gpu_powx(distance_mat_.count(), distance_mat_.gpu_data(), Dtype(2), distance_square.mutable_gpu_data());*/ /*Blob<Dtype> pa;*/ /*pa.ReshapeLike(mat_);*/ /*// (xi - cyj)T(xi - cyj)*/ /*caffe_gpu_gemv<Dtype>(CblasNoTrans, M_ * M_, K_, 1., distance_square.gpu_data(), Im_.gpu_data(), 0., distance_norm_mat_.mutable_gpu_data());*/ /*// ||xi - cyj||*/ /*caffe_gpu_powx(mat_.count(), distance_norm_mat_.gpu_data(), Dtype(0.5), distance_norm_mat_.mutable_gpu_data());*/ /*// ||xi - cyj|| - 1*/ /*caffe_gpu_sub(mat_.count(), distance_norm_mat_.gpu_data(), Ia_.gpu_data(), pa.mutable_gpu_data());*/ /*// lij(||xi - cyj|| - 1)*/ /*caffe_gpu_mul(mat_.count(), lij_.gpu_data(), pa.gpu_data(), pa.mutable_gpu_data());*/ /*// lij(||xi - cyj|| - 1) + alpha*/ /*caffe_gpu_axpy(mat_.count(), margin_, Ia_.gpu_data(), pa.mutable_gpu_data());*/ /*Compute_mat_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),*/ /*CAFFE_CUDA_NUM_THREADS>>>(nthreads, pa.gpu_data(),*/ /*mat_.mutable_gpu_data());*/ /*Dtype dot;*/ /*// caffe_gpu_dot(M_ * K_, distance_.gpu_data(), distance_.gpu_data(), &dot);*/ /*caffe_gpu_dot(M_ * M_, mat_.gpu_data(), pa.gpu_data(), &dot);*/ /*Dtype loss = dot / M_ / Dtype(2);*/ /*top[0]->mutable_gpu_data()[0] = loss;*/ /*}*/ /*template <typename Dtype>*/ /*void CenterLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,*/ /*const vector<bool>& propagate_down,*/ /*const vector<Blob<Dtype>*>& bottom) {*/ /*int nthreads = N_;*/ /*caffe_gpu_set(N_ * K_, (Dtype)0., variation_sum_.mutable_cpu_data());*/ /*Compute_center_diff_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),*/ /*CAFFE_CUDA_NUM_THREADS>>>(nthreads, M_, K_, bottom[1]->gpu_data(), distance_.gpu_data(), */ /*variation_sum_.mutable_cpu_data(), this->blobs_[0]->mutable_gpu_diff());*/ /*if (propagate_down[0]) {*/ /*// caffe_gpu_scale(M_ * K_, top[0]->cpu_diff()[0] / M_, */ /*// distance_.gpu_data(), bottom[0]->mutable_gpu_diff());*/ /*Blob<Dtype> scale;*/ /*scale.ReshapeLike(mat_);*/ /*// scale: lij / ||xi - cyj||*/ /*caffe_gpu_div(mat_.count(), lij_.gpu_data(), distance_norm_mat_.gpu_data(), scale.mutable_gpu_data());*/ /*// caffe_copy(mat_.count(), lij_.cpu_data(), scale.mutable_cpu_data());*/ /*// Iij * lij / ||xi - cyj||*/ /*caffe_gpu_mul(mat_.count(), mat_.gpu_data(), scale.gpu_data(), scale.mutable_gpu_data());*/ /*Blob<Dtype> matI;*/ /*matI.ReshapeLike(distance_mat_);*/ /*// [mat_ * scale, mat_ * scale, ..., mat_ * scale]*/ /*caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_ * M_, K_, 1, 1, scale.gpu_data(), Im_.gpu_data(), 0., matI.mutable_gpu_data());*/ /*// (xi - cyj) .* [mat_ * scale, mat_ * scale, ..., mat_ * scale]*/ /*caffe_gpu_mul(distance_mat_.count(), distance_mat_.gpu_data(), matI.gpu_data(), matI.mutable_gpu_data());*/ /*caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, M_ * M_, 1, Ib_.gpu_data(), matI.gpu_data(), 0., bottom[0]->mutable_gpu_diff());*/ /*caffe_gpu_scal(M_ * K_, top[0]->gpu_diff()[0] / (2 * M_), bottom[0]->mutable_gpu_diff());*/ /*}*/ /*if (propagate_down[1]) {*/ /*LOG(FATAL) << this->type()*/ /*<< " Layer cannot backpropagate to label inputs.";*/ /*}*/ /*}*/ /*INSTANTIATE_LAYER_GPU_FUNCS(CenterLossLayer);*/ /*} // namespace caffe*/ #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/center_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #include <iostream> namespace caffe { template <typename Dtype> __global__ void Compute_distance_data_gpu(int nthreads, const int K, const Dtype* bottom, const Dtype* label, const Dtype* center, Dtype* distance) { CUDA_KERNEL_LOOP(index, nthreads) { int m = index / K; int k = index % K; const int label_value = static_cast<int>(label[m]); // distance(i) = x(i) - c_{y(i)} distance[index] = bottom[index] - center[label_value * K + k]; } } template <typename Dtype> __global__ void Compute_lij_data_gpu(int nthreads, const int N, const Dtype* label, Dtype* lij) { CUDA_KERNEL_LOOP(index, nthreads) { int i = index / N; int j = index % N; const int label_value = static_cast<int>(label[i]); if (label_value == j) { lij[index] = 1; } else { lij[index] = -1; } } } template <typename Dtype> __global__ void Compute_distance_mat_data_gpu(int nthreads, const int N, const int K, const Dtype* bottom, const Dtype* center, Dtype* distance_mat) { CUDA_KERNEL_LOOP(index, nthreads) { int m = index / K / N; int n = index / K % N; int k = index % K; distance_mat[index] = bottom[m * K + k] - center[n * K + k]; } } template <typename Dtype> __global__ void Compute_mat_data_gpu(int nthreads, const Dtype* pa, Dtype* mat) { CUDA_KERNEL_LOOP(index, nthreads) { if (pa[index] > 0) { mat[index] = 1; } else { mat[index] = 0; } } } template <typename Dtype> __global__ void Compute_center_diff_gpu(int nthreads, const int M, const int K, const Dtype* label, const Dtype* distance, Dtype* variation_sum, Dtype* center_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int count = 0; for (int m = 0; m < M; m++) { const int label_value = static_cast<int>(label[m]); if (label_value == index) { count++; for (int k = 0; k < K; k++) { variation_sum[index * K + k] -= distance[m * K + k]; } } } for (int k = 0; k < K; k++) { center_diff[index * K + k] = variation_sum[index * K + k] /(count + (Dtype)1.); } } } template <typename Dtype> void CenterLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int nthreads = M_ * K_; Compute_distance_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, bottom[0]->gpu_data(), bottom[1]->gpu_data(), this->blobs_[0]->gpu_data(), distance_.mutable_gpu_data()); nthreads = M_ * N_; Compute_lij_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, bottom[1]->gpu_data(), lij_.mutable_gpu_data()); nthreads = M_ * N_ * K_; Compute_distance_mat_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, K_, bottom[0]->gpu_data(), this->blobs_[0]->gpu_data(), distance_mat_.mutable_gpu_data()); Blob<Dtype> distance_square; distance_square.ReshapeLike(distance_mat_); // (xi - cj) .* (xi - cj) caffe_gpu_powx(distance_mat_.count(), distance_mat_.gpu_data(), Dtype(2), distance_square.mutable_gpu_data()); Blob<Dtype> pa; pa.ReshapeLike(mat_); // (xi - cj)T(xi - cj) caffe_gpu_gemv<Dtype>(CblasNoTrans, M_ * N_, K_, 1., distance_square.gpu_data(), Im_.gpu_data(), 0., distance_norm_mat_.mutable_gpu_data()); // ||xi - cj|| caffe_gpu_powx(mat_.count(), distance_norm_mat_.gpu_data(), Dtype(0.5), distance_norm_mat_.mutable_gpu_data()); // ||xi - cj|| - 1 caffe_gpu_sub(mat_.count(), distance_norm_mat_.gpu_data(), Ia_.gpu_data(), pa.mutable_gpu_data()); // lij(||xi - cj|| - 1) caffe_gpu_mul(mat_.count(), lij_.gpu_data(), pa.gpu_data(), pa.mutable_gpu_data()); // lij(||xi - cj|| - 1) + alpha caffe_gpu_axpy(mat_.count(), margin_, Ia_.gpu_data(), pa.mutable_gpu_data()); nthreads = M_ * N_; Compute_mat_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, pa.gpu_data(), mat_.mutable_gpu_data()); Dtype dot; // caffe_gpu_dot(M_ * K_, distance_.gpu_data(), distance_.gpu_data(), &dot); caffe_gpu_dot(M_ * N_, mat_.gpu_data(), pa.gpu_data(), &dot); Dtype loss = dot / M_; top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void CenterLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int nthreads = N_; caffe_gpu_set(N_ * K_, (Dtype)0., variation_sum_.mutable_gpu_data()); Compute_center_diff_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, M_, K_, bottom[1]->gpu_data(), distance_.gpu_data(), variation_sum_.mutable_gpu_data(), this->blobs_[0]->mutable_gpu_diff()); if (propagate_down[0]) { // caffe_gpu_scale(M_ * K_, top[0]->cpu_diff()[0] / M_, // distance_.gpu_data(), bottom[0]->mutable_gpu_diff()); Blob<Dtype> scale; scale.ReshapeLike(mat_); // scale: lij / ||xi - cj|| caffe_gpu_div(mat_.count(), lij_.gpu_data(), distance_norm_mat_.gpu_data(), scale.mutable_gpu_data()); // caffe_copy(mat_.count(), lij_.cpu_data(), scale.mutable_cpu_data()); // Iij * lij / ||xi - cj|| caffe_gpu_mul(mat_.count(), mat_.gpu_data(), scale.gpu_data(), scale.mutable_gpu_data()); Blob<Dtype> matI; matI.ReshapeLike(distance_mat_); // [mat_ * scale, mat_ * scale, ..., mat_ * scale] caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_ * N_, K_, 1, 1, scale.gpu_data(), Im_.gpu_data(), 0., matI.mutable_gpu_data()); // (xi - cj) .* [mat_ * scale, mat_ * scale, ..., mat_ * scale] caffe_gpu_mul(distance_mat_.count(), distance_mat_.gpu_data(), matI.gpu_data(), matI.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, M_ * N_, 1, Ib_.gpu_data(), matI.gpu_data(), 0., bottom[0]->mutable_gpu_diff()); caffe_gpu_scal(M_ * K_, top[0]->cpu_diff()[0] / M_, bottom[0]->mutable_gpu_diff()); } if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } } INSTANTIATE_LAYER_GPU_FUNCS(CenterLossLayer); }
7aba4d5a8be88e941168e17015640512c5eed6b3.hip
// !!! This is a file automatically generated by hipify!!! //This code is a modification of microbenchmarks from //"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf //This benchmark measures the maximum read bandwidth of shared memory for 32 bit read //This code have been tested on Volta V100 architecture #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define SHARED_MEM_SIZE_BYTE (48*1024) //size in bytes, max 96KB for v100 #define SHARED_MEM_SIZE (SHARED_MEM_SIZE_BYTE/4) //#define SHARED_MEM_SIZE (16384) #define ITERS (4096) #define BLOCKS_NUM 1 #define THREADS_PER_BLOCK 1024 #define WARP_SIZE 32 #define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM) // GPU error check #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){ if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void shared_bw(uint32_t *startClk, uint32_t *stopClk, uint32_t *dsink, uint32_t stride){ // thread index uint32_t tid = threadIdx.x; uint32_t bid = blockIdx.x; uint32_t uid = bid*blockDim.x+tid; uint32_t n_threads = blockDim.x * gridDim.x; // a register to avoid compiler optimization //uint32_t sink0 = 0; register uint32_t tmp = uid; uint32_t start = 0; uint32_t stop = 0; __shared__ uint32_t s[SHARED_MEM_SIZE]; //static shared memory //uint32_t s[SHARED_MEM_SIZE]; // one thread to initialize the pointer-chasing array for (uint32_t i=uid; i<(SHARED_MEM_SIZE); i+=n_threads) s[i] = (i+stride)%SHARED_MEM_SIZE; // synchronize all threads asm volatile ("bar.sync 0;"); // start timing asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory"); // load data from l1 cache and accumulate for(uint32_t i=0; i<ITERS; ++i){ tmp = s[tmp]; } // synchronize all threads asm volatile("bar.sync 0;"); // stop timing asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory"); //sink0 = tmp; // write time and data back to memory startClk[uid] = start; stopClk[uid] = stop; dsink[uid] = tmp; } int main(){ uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t)); uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t)); uint32_t *dsink = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t)); uint32_t *startClk_g; uint32_t *stopClk_g; uint32_t *dsink_g; gpuErrchk( hipMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) ); gpuErrchk( hipMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) ); gpuErrchk( hipMalloc(&dsink_g, TOTAL_THREADS*sizeof(uint32_t)) ); hipLaunchKernelGGL(( shared_bw), dim3(BLOCKS_NUM),dim3(THREADS_PER_BLOCK), 0, 0, startClk_g, stopClk_g, dsink_g, 1024); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), hipMemcpyDeviceToHost) ); gpuErrchk( hipMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), hipMemcpyDeviceToHost) ); gpuErrchk( hipMemcpy(dsink, dsink_g, TOTAL_THREADS*sizeof(uint32_t), hipMemcpyDeviceToHost) ); double bw; bw = (double)(ITERS*TOTAL_THREADS*4)/((double)(stopClk[0]-startClk[0])); printf("Shared Memory Bandwidth = %f (byte/clk/SM)\n", bw); printf("Total Clk number = %u \n", stopClk[0]-startClk[0]); return 0; }
7aba4d5a8be88e941168e17015640512c5eed6b3.cu
//This code is a modification of microbenchmarks from //"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf //This benchmark measures the maximum read bandwidth of shared memory for 32 bit read //This code have been tested on Volta V100 architecture #include <stdio.h> #include <stdlib.h> #include <cuda.h> #define SHARED_MEM_SIZE_BYTE (48*1024) //size in bytes, max 96KB for v100 #define SHARED_MEM_SIZE (SHARED_MEM_SIZE_BYTE/4) //#define SHARED_MEM_SIZE (16384) #define ITERS (4096) #define BLOCKS_NUM 1 #define THREADS_PER_BLOCK 1024 #define WARP_SIZE 32 #define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM) // GPU error check #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){ if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void shared_bw(uint32_t *startClk, uint32_t *stopClk, uint32_t *dsink, uint32_t stride){ // thread index uint32_t tid = threadIdx.x; uint32_t bid = blockIdx.x; uint32_t uid = bid*blockDim.x+tid; uint32_t n_threads = blockDim.x * gridDim.x; // a register to avoid compiler optimization //uint32_t sink0 = 0; register uint32_t tmp = uid; uint32_t start = 0; uint32_t stop = 0; __shared__ uint32_t s[SHARED_MEM_SIZE]; //static shared memory //uint32_t s[SHARED_MEM_SIZE]; // one thread to initialize the pointer-chasing array for (uint32_t i=uid; i<(SHARED_MEM_SIZE); i+=n_threads) s[i] = (i+stride)%SHARED_MEM_SIZE; // synchronize all threads asm volatile ("bar.sync 0;"); // start timing asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory"); // load data from l1 cache and accumulate for(uint32_t i=0; i<ITERS; ++i){ tmp = s[tmp]; } // synchronize all threads asm volatile("bar.sync 0;"); // stop timing asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory"); //sink0 = tmp; // write time and data back to memory startClk[uid] = start; stopClk[uid] = stop; dsink[uid] = tmp; } int main(){ uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t)); uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t)); uint32_t *dsink = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t)); uint32_t *startClk_g; uint32_t *stopClk_g; uint32_t *dsink_g; gpuErrchk( cudaMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) ); gpuErrchk( cudaMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) ); gpuErrchk( cudaMalloc(&dsink_g, TOTAL_THREADS*sizeof(uint32_t)) ); shared_bw<<<BLOCKS_NUM,THREADS_PER_BLOCK>>>(startClk_g, stopClk_g, dsink_g, 1024); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(dsink, dsink_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) ); double bw; bw = (double)(ITERS*TOTAL_THREADS*4)/((double)(stopClk[0]-startClk[0])); printf("Shared Memory Bandwidth = %f (byte/clk/SM)\n", bw); printf("Total Clk number = %u \n", stopClk[0]-startClk[0]); return 0; }
68ad76b620f33472a51241abadf9e749930e0004.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "filler.hpp" #include "inner_distance_layer.hpp" #include "math_functions.hpp" namespace caffe { #define sign(x) (Dtype(0) < (x)) - ((x) < Dtype(0)) template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num) { Dtype dot = 0; for (int d = 0; d < dim; ++d) { dot += data_1[index * dim + d] * data_2[index * dim + d]; } channel_dot[index] = dot; } } template <typename Dtype> __global__ void kernel_channel_scal(const int num, const int dim, const Dtype* norm_data, Dtype* input_output_data) { CUDA_KERNEL_LOOP(index, num * dim) { int n = index / dim; input_output_data[index] *= norm_data[n]; } } template <typename Dtype> __global__ void inner_distance_forward_L2(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, Dtype* top_data) { CUDA_KERNEL_LOOP(index, M_ * N_) { int m = index / N_; int n = index % N_; Dtype sum = Dtype(0); for (int k = 0; k < K_; ++k) { sum += (bottom_data[m * K_ + k] - weight[n * K_ + k]) * (bottom_data[m * K_ + k] - weight[n * K_ + k]); } top_data[index] = sum; } } template <typename Dtype> __global__ void inner_distance_forward_L1(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, Dtype* top_data) { CUDA_KERNEL_LOOP(index, M_ * N_) { int m = index / N_; int n = index % N_; Dtype sum = Dtype(0); for (int k = 0; k < K_; ++k) { sum += abs(bottom_data[m * K_ + k] - weight[n * K_ + k]); } top_data[index] = sum; } } template <typename Dtype> void InnerDistanceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data(); if (normalize_ && bottom.size() == 1) { Dtype* mutable_weight = this->blobs_[0]->mutable_gpu_data(); Dtype* weight_norm_data = weight_norm_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(N_), CAFFE_CUDA_NUM_THREADS >> > (N_, K_, weight, weight, weight_norm_data); caffe_gpu_powx(N_, weight_norm_data, Dtype(-0.5), weight_norm_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_scal<Dtype> << <CAFFE_GET_BLOCKS(N_ * K_), CAFFE_CUDA_NUM_THREADS >> > (N_, K_, weight_norm_data, mutable_weight); } if (distance_type_ == "L2") { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( inner_distance_forward_L2<Dtype>) , dim3(CAFFE_GET_BLOCKS(M_ * N_)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, M_, N_, K_, bottom_data, weight, top_data); } else if (distance_type_ == "L1") { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( inner_distance_forward_L1<Dtype>) , dim3(CAFFE_GET_BLOCKS(M_ * N_)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, M_, N_, K_, bottom_data, weight, top_data); } else { NOT_IMPLEMENTED; } } template <typename Dtype> __global__ void inner_distance_backward_L2(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, M_ * K_) { int m = index / K_; int k = index % K_; for (int n = 0; n < N_; ++n) { bottom_diff[index] += top_diff[m * N_ + n] * (bottom_data[m * K_ + k] - weight[n * K_ + k]) * Dtype(2); } } } template <typename Dtype> __global__ void inner_distance_backward_L1(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, M_ * K_) { int m = index / K_; int k = index % K_; for (int n = 0; n < N_; ++n) { bottom_diff[index] += top_diff[m * N_ + n] * sign(bottom_data[m * K_ + k] - weight[n * K_ + k]); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L2(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, N_ * K_) { int n = index / K_; int k = index % K_; for (int m = 0; m < M_; ++m) { weight_diff[index] += top_diff[m * N_ + n] * (weight[index] - bottom_data[m * K_ + k]) * Dtype(2); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L2_center_only(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* label_data, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, K_) { int k = index; for (int m = 0; m < M_; ++m) { int n = static_cast<int>(label_data[m]); weight_diff[n * K_ + k] += top_diff[m * N_ + n] * (weight[n * K_ + k] - bottom_data[m * K_ + k]) * Dtype(2); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L1(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, N_ * K_) { int n = index / K_; int k = index % K_; for (int m = 0; m < M_; ++m) { weight_diff[index] += top_diff[m * N_ + n] * sign(weight[index] - bottom_data[m * K_ + k]); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L1_center_only(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* label_data, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, K_) { int k = index; for (int m = 0; m < M_; ++m) { int n = static_cast<int>(label_data[m]); weight_diff[n * K_ + k] += top_diff[m * N_ + n] * sign(weight[n * K_ + k] - bottom_data[m * K_ + k]); } } } INSTANTIATE_LAYER_GPU_FUNCS(InnerDistanceLayer); } // namespace caffe
68ad76b620f33472a51241abadf9e749930e0004.cu
#include <vector> #include "filler.hpp" #include "inner_distance_layer.hpp" #include "math_functions.hpp" namespace caffe { #define sign(x) (Dtype(0) < (x)) - ((x) < Dtype(0)) template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num) { Dtype dot = 0; for (int d = 0; d < dim; ++d) { dot += data_1[index * dim + d] * data_2[index * dim + d]; } channel_dot[index] = dot; } } template <typename Dtype> __global__ void kernel_channel_scal(const int num, const int dim, const Dtype* norm_data, Dtype* input_output_data) { CUDA_KERNEL_LOOP(index, num * dim) { int n = index / dim; input_output_data[index] *= norm_data[n]; } } template <typename Dtype> __global__ void inner_distance_forward_L2(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, Dtype* top_data) { CUDA_KERNEL_LOOP(index, M_ * N_) { int m = index / N_; int n = index % N_; Dtype sum = Dtype(0); for (int k = 0; k < K_; ++k) { sum += (bottom_data[m * K_ + k] - weight[n * K_ + k]) * (bottom_data[m * K_ + k] - weight[n * K_ + k]); } top_data[index] = sum; } } template <typename Dtype> __global__ void inner_distance_forward_L1(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, Dtype* top_data) { CUDA_KERNEL_LOOP(index, M_ * N_) { int m = index / N_; int n = index % N_; Dtype sum = Dtype(0); for (int k = 0; k < K_; ++k) { sum += abs(bottom_data[m * K_ + k] - weight[n * K_ + k]); } top_data[index] = sum; } } template <typename Dtype> void InnerDistanceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data(); if (normalize_ && bottom.size() == 1) { Dtype* mutable_weight = this->blobs_[0]->mutable_gpu_data(); Dtype* weight_norm_data = weight_norm_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(N_), CAFFE_CUDA_NUM_THREADS >> > (N_, K_, weight, weight, weight_norm_data); caffe_gpu_powx(N_, weight_norm_data, Dtype(-0.5), weight_norm_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_scal<Dtype> << <CAFFE_GET_BLOCKS(N_ * K_), CAFFE_CUDA_NUM_THREADS >> > (N_, K_, weight_norm_data, mutable_weight); } if (distance_type_ == "L2") { // NOLINT_NEXT_LINE(whitespace/operators) inner_distance_forward_L2<Dtype> <<<CAFFE_GET_BLOCKS(M_ * N_), CAFFE_CUDA_NUM_THREADS >>> (M_, N_, K_, bottom_data, weight, top_data); } else if (distance_type_ == "L1") { // NOLINT_NEXT_LINE(whitespace/operators) inner_distance_forward_L1<Dtype> <<<CAFFE_GET_BLOCKS(M_ * N_), CAFFE_CUDA_NUM_THREADS >>> (M_, N_, K_, bottom_data, weight, top_data); } else { NOT_IMPLEMENTED; } } template <typename Dtype> __global__ void inner_distance_backward_L2(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, M_ * K_) { int m = index / K_; int k = index % K_; for (int n = 0; n < N_; ++n) { bottom_diff[index] += top_diff[m * N_ + n] * (bottom_data[m * K_ + k] - weight[n * K_ + k]) * Dtype(2); } } } template <typename Dtype> __global__ void inner_distance_backward_L1(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, M_ * K_) { int m = index / K_; int k = index % K_; for (int n = 0; n < N_; ++n) { bottom_diff[index] += top_diff[m * N_ + n] * sign(bottom_data[m * K_ + k] - weight[n * K_ + k]); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L2(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, N_ * K_) { int n = index / K_; int k = index % K_; for (int m = 0; m < M_; ++m) { weight_diff[index] += top_diff[m * N_ + n] * (weight[index] - bottom_data[m * K_ + k]) * Dtype(2); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L2_center_only(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* label_data, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, K_) { int k = index; for (int m = 0; m < M_; ++m) { int n = static_cast<int>(label_data[m]); weight_diff[n * K_ + k] += top_diff[m * N_ + n] * (weight[n * K_ + k] - bottom_data[m * K_ + k]) * Dtype(2); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L1(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, N_ * K_) { int n = index / K_; int k = index % K_; for (int m = 0; m < M_; ++m) { weight_diff[index] += top_diff[m * N_ + n] * sign(weight[index] - bottom_data[m * K_ + k]); } } } template <typename Dtype> __global__ void inner_distance_weight_backward_L1_center_only(const int M_, const int N_, const int K_, const Dtype* bottom_data, const Dtype* weight, const Dtype* label_data, const Dtype* top_diff, Dtype* weight_diff) { CUDA_KERNEL_LOOP(index, K_) { int k = index; for (int m = 0; m < M_; ++m) { int n = static_cast<int>(label_data[m]); weight_diff[n * K_ + k] += top_diff[m * N_ + n] * sign(weight[n * K_ + k] - bottom_data[m * K_ + k]); } } } INSTANTIATE_LAYER_GPU_FUNCS(InnerDistanceLayer); } // namespace caffe
8dbc1a55a096077a6d42b3b637559316a501e321.hip
// !!! This is a file automatically generated by hipify!!! /* Ye Wang CPEG655 lab2 problem 2.a */ #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> #include <sys/time.h> __global__ void matrixMul_2a(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N); void mm(float * C, float * A, float * B, int N); float GetRand(int seed); void randomInit(float *data, int size, float val); void constantInit(float *data, int size, float val); int matrixMultiply(int tile_size, int block_size, dim3 &dimsA, dim3 &dimsB); int main(int argc, char **argv) { int block_size = 1; int tile_size=1; int N=1024; dim3 dimsA(N,N); dim3 dimsB(N,N); //process of finding the best NB and NT // for(int i=0;i<4;i++,block_size*=2) // { // tile_size=1; // for(int j=0;j<5;j++,tile_size*=2){ // matrixMultiply(tile_size, block_size, dimsA, dimsB); // } // } block_size = 16; tile_size=1; matrixMultiply(tile_size, block_size, dimsA, dimsB); return 0; } //BLOCK_SIZE=width / GridDim(number of blocks in a dimention) __global__ void matrixMul_1a(float *C, float *A, float *B, int N) { // Thread index int tx = threadIdx.x; int ty = threadIdx.y; float sum = 0.f; for (int n=0; n<N-1; n++){ sum += A[ty*N+n]*B[n*N+tx]; } C[ty*N+tx] = sum; } __global__ void matrixMul_1b(int BLOCK_SIZE, float *C, float *A, float *B, int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x+bx*BLOCK_SIZE; int ty = threadIdx.y+by*BLOCK_SIZE; float Csub = 0; for (int i= 0; i < N; i++) { Csub+=(A[ty*N+i]*B[tx+N*i]); } C[N * ty + tx] = Csub; //__syncthreads(); } __global__ void matrixMul_2a(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int Astart = threadIdx.x*TILE_SIZE+bx*BLOCK_SIZE*TILE_SIZE; int Bstart = threadIdx.y*TILE_SIZE+by*BLOCK_SIZE*TILE_SIZE; int tx=Astart; int ty=Bstart; float Csub = 0; for(int k=0;k<TILE_SIZE;k++,ty++) { tx=Astart; for(int j=0;j<TILE_SIZE;j++, tx++) { Csub = 0; for (int i= 0; i < N; i++) { Csub +=A[ty*N+i]*B[tx+N*i];// a*b; } C[N * ty + tx] =Csub; } } } __global__ void matrixMul_2b_pragma(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int Astart = threadIdx.x*TILE_SIZE+bx*BLOCK_SIZE*TILE_SIZE; int Bstart = threadIdx.y*TILE_SIZE+by*BLOCK_SIZE*TILE_SIZE; int tx=Astart; int ty=Bstart; float Csub = 0; for(int k=0;k<TILE_SIZE;k++,ty++) { tx=Astart; for(int j=0;j<TILE_SIZE;j++, tx++) { Csub = 0; #pragma unroll 8 for (int i= 0; i < N; i++) { Csub +=A[ty*N+i]*B[tx+N*i];// a*b; } C[N * ty + tx] =Csub; } } } __global__ void matrixMul_2b_manual(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int Astart = threadIdx.x*TILE_SIZE+bx*BLOCK_SIZE*TILE_SIZE; int Bstart = threadIdx.y*TILE_SIZE+by*BLOCK_SIZE*TILE_SIZE; int tx=Astart; int ty=Bstart; float Csub = 0; for(int k=0;k<TILE_SIZE;k++,ty++) { tx=Astart; for(int j=0;j<TILE_SIZE;j++, tx++) { Csub = 0; for (int i= 0; i < N; i+=8) { int px=ty*N+i; int py=tx+N*i; Csub +=A[px]*B[py]; Csub +=A[px+1]*B[py+N]; Csub +=A[px+2]*B[py+2*N]; Csub +=A[px+3]*B[py+3*N]; Csub +=A[px+4]*B[py+4*N]; Csub +=A[px+5]*B[py+5*N]; Csub +=A[px+6]*B[py+6*N]; Csub +=A[px+7]*B[py+7*N]; } C[N * ty + tx] =Csub; } } } void mm(float * C, float * A, float * B, int N) { int i,j,k; float sum=0; for(j=0;j<N;j++) for(i=0;i<N;i++){ C[i*N+j]=0; sum=0; for(k=0;k<N;k++) sum+=A[i*N+k]*B[k*N+j]; C[i*N+j]=sum; } } float GetRand(int seed) { struct timeval tv; gettimeofday(&tv,NULL); srand(tv.tv_usec%17+seed); // printf("xxxPacket_loss_rate:Random %f\n",(rand()% 1000) / 1000.0); return((rand()% 1000) / 1.02); } void randomInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = GetRand(i); } } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val;//+i%2; } } int matrixMultiply(int tile_size, int block_size, dim3 &dimsA, dim3 &dimsB) { printf("START: Tile[%d,%d],Block[%d,%d], Matrix[%d,%d]\n",tile_size,tile_size,block_size,block_size,dimsB.x,dimsA.y); // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory randomInit(h_A, size_A, 2.1f); randomInit(h_B, size_B, 1.f); unsigned int size_C = dimsB.x * dimsA.y; // Allocate device memory float *d_A, *d_B, *d_C; dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); float *test_C = (float *) malloc(mem_size_C); constantInit(test_C, size_C, 0.f); constantInit(h_C, size_C, 0.f); hipMalloc((void **) &d_A, mem_size_A); hipMalloc((void **) &d_B, mem_size_B); hipMalloc((void **) &d_C, mem_size_C); hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); hipMemcpy(d_C, h_C, mem_size_C, hipMemcpyHostToDevice); // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsA.y/(block_size*tile_size), dimsB.x/(block_size*tile_size)); hipDeviceSynchronize();//////////////////////**** // Allocate CUDA events that we'll use for timing hipEvent_t start; hipEventCreate(&start); hipEvent_t stop; hipEventCreate(&stop); // Record the start event hipEventRecord(start, NULL); // Execute th kernel int nIter = 2; for (int j = 0; j < nIter; j++) { hipLaunchKernelGGL(( matrixMul_2a), dim3(grid), dim3(threads) , 0, 0, tile_size, block_size, d_C, d_A, d_B, dimsA.x); } // Record the stop event hipEventRecord(stop, NULL); hipEventSynchronize(stop); float msecTotal = 0.0f; hipEventElapsedTime(&msecTotal, start, stop); // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "REPORT:\n Performance= %.2f GFlop/s\n Time= %.3f msec\n Size= %.0f Ops\n WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy t_C,h_A,h_B,dimsA.x);esult from device to host hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); //double eps = 1.e-6 ; mm(test_C,h_A,h_B,dimsA.x); int verify=1; for (int i=0;i<mem_size_C/4;i++) { if(h_C[i]!=test_C[i]&&(fabs(h_C[i]-test_C[i])/test_C[i])>1E-6){ printf("Matrix[A:%d,B:%d,C:%d] C[%d]=%f, Expect= %f\n",mem_size_A,mem_size_B,mem_size_C,i,h_C[i],test_C[i]); verify=0; break; } } free(h_A); free(test_C); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); hipDeviceReset(); if (verify) { printf("SUCCESS!\n\n"); return true; }else{ printf("WRONG RESULT!\n\n"); return false; } }
8dbc1a55a096077a6d42b3b637559316a501e321.cu
/* Ye Wang CPEG655 lab2 problem 2.a */ #include <stdio.h> #include <assert.h> #include <cuda_runtime.h> #include <sys/time.h> __global__ void matrixMul_2a(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N); void mm(float * C, float * A, float * B, int N); float GetRand(int seed); void randomInit(float *data, int size, float val); void constantInit(float *data, int size, float val); int matrixMultiply(int tile_size, int block_size, dim3 &dimsA, dim3 &dimsB); int main(int argc, char **argv) { int block_size = 1; int tile_size=1; int N=1024; dim3 dimsA(N,N); dim3 dimsB(N,N); //process of finding the best NB and NT // for(int i=0;i<4;i++,block_size*=2) // { // tile_size=1; // for(int j=0;j<5;j++,tile_size*=2){ // matrixMultiply(tile_size, block_size, dimsA, dimsB); // } // } block_size = 16; tile_size=1; matrixMultiply(tile_size, block_size, dimsA, dimsB); return 0; } //BLOCK_SIZE=width / GridDim(number of blocks in a dimention) __global__ void matrixMul_1a(float *C, float *A, float *B, int N) { // Thread index int tx = threadIdx.x; int ty = threadIdx.y; float sum = 0.f; for (int n=0; n<N-1; n++){ sum += A[ty*N+n]*B[n*N+tx]; } C[ty*N+tx] = sum; } __global__ void matrixMul_1b(int BLOCK_SIZE, float *C, float *A, float *B, int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x+bx*BLOCK_SIZE; int ty = threadIdx.y+by*BLOCK_SIZE; float Csub = 0; for (int i= 0; i < N; i++) { Csub+=(A[ty*N+i]*B[tx+N*i]); } C[N * ty + tx] = Csub; //__syncthreads(); } __global__ void matrixMul_2a(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int Astart = threadIdx.x*TILE_SIZE+bx*BLOCK_SIZE*TILE_SIZE; int Bstart = threadIdx.y*TILE_SIZE+by*BLOCK_SIZE*TILE_SIZE; int tx=Astart; int ty=Bstart; float Csub = 0; for(int k=0;k<TILE_SIZE;k++,ty++) { tx=Astart; for(int j=0;j<TILE_SIZE;j++, tx++) { Csub = 0; for (int i= 0; i < N; i++) { Csub +=A[ty*N+i]*B[tx+N*i];// a*b; } C[N * ty + tx] =Csub; } } } __global__ void matrixMul_2b_pragma(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int Astart = threadIdx.x*TILE_SIZE+bx*BLOCK_SIZE*TILE_SIZE; int Bstart = threadIdx.y*TILE_SIZE+by*BLOCK_SIZE*TILE_SIZE; int tx=Astart; int ty=Bstart; float Csub = 0; for(int k=0;k<TILE_SIZE;k++,ty++) { tx=Astart; for(int j=0;j<TILE_SIZE;j++, tx++) { Csub = 0; #pragma unroll 8 for (int i= 0; i < N; i++) { Csub +=A[ty*N+i]*B[tx+N*i];// a*b; } C[N * ty + tx] =Csub; } } } __global__ void matrixMul_2b_manual(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int Astart = threadIdx.x*TILE_SIZE+bx*BLOCK_SIZE*TILE_SIZE; int Bstart = threadIdx.y*TILE_SIZE+by*BLOCK_SIZE*TILE_SIZE; int tx=Astart; int ty=Bstart; float Csub = 0; for(int k=0;k<TILE_SIZE;k++,ty++) { tx=Astart; for(int j=0;j<TILE_SIZE;j++, tx++) { Csub = 0; for (int i= 0; i < N; i+=8) { int px=ty*N+i; int py=tx+N*i; Csub +=A[px]*B[py]; Csub +=A[px+1]*B[py+N]; Csub +=A[px+2]*B[py+2*N]; Csub +=A[px+3]*B[py+3*N]; Csub +=A[px+4]*B[py+4*N]; Csub +=A[px+5]*B[py+5*N]; Csub +=A[px+6]*B[py+6*N]; Csub +=A[px+7]*B[py+7*N]; } C[N * ty + tx] =Csub; } } } void mm(float * C, float * A, float * B, int N) { int i,j,k; float sum=0; for(j=0;j<N;j++) for(i=0;i<N;i++){ C[i*N+j]=0; sum=0; for(k=0;k<N;k++) sum+=A[i*N+k]*B[k*N+j]; C[i*N+j]=sum; } } float GetRand(int seed) { struct timeval tv; gettimeofday(&tv,NULL); srand(tv.tv_usec%17+seed); // printf("xxxPacket_loss_rate:Random %f\n",(rand()% 1000) / 1000.0); return((rand()% 1000) / 1.02); } void randomInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = GetRand(i); } } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val;//+i%2; } } int matrixMultiply(int tile_size, int block_size, dim3 &dimsA, dim3 &dimsB) { printf("START: Tile[%d,%d],Block[%d,%d], Matrix[%d,%d]\n",tile_size,tile_size,block_size,block_size,dimsB.x,dimsA.y); // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory randomInit(h_A, size_A, 2.1f); randomInit(h_B, size_B, 1.f); unsigned int size_C = dimsB.x * dimsA.y; // Allocate device memory float *d_A, *d_B, *d_C; dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); float *test_C = (float *) malloc(mem_size_C); constantInit(test_C, size_C, 0.f); constantInit(h_C, size_C, 0.f); cudaMalloc((void **) &d_A, mem_size_A); cudaMalloc((void **) &d_B, mem_size_B); cudaMalloc((void **) &d_C, mem_size_C); cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C, mem_size_C, cudaMemcpyHostToDevice); // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsA.y/(block_size*tile_size), dimsB.x/(block_size*tile_size)); cudaDeviceSynchronize();//////////////////////**** // Allocate CUDA events that we'll use for timing cudaEvent_t start; cudaEventCreate(&start); cudaEvent_t stop; cudaEventCreate(&stop); // Record the start event cudaEventRecord(start, NULL); // Execute th kernel int nIter = 2; for (int j = 0; j < nIter; j++) { matrixMul_2a<<< grid, threads >>>(tile_size, block_size, d_C, d_A, d_B, dimsA.x); } // Record the stop event cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal, start, stop); // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "REPORT:\n Performance= %.2f GFlop/s\n Time= %.3f msec\n Size= %.0f Ops\n WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy t_C,h_A,h_B,dimsA.x);esult from device to host cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); //double eps = 1.e-6 ; mm(test_C,h_A,h_B,dimsA.x); int verify=1; for (int i=0;i<mem_size_C/4;i++) { if(h_C[i]!=test_C[i]&&(fabs(h_C[i]-test_C[i])/test_C[i])>1E-6){ printf("Matrix[A:%d,B:%d,C:%d] C[%d]=%f, Expect= %f\n",mem_size_A,mem_size_B,mem_size_C,i,h_C[i],test_C[i]); verify=0; break; } } free(h_A); free(test_C); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); if (verify) { printf("SUCCESS!\n\n"); return true; }else{ printf("WRONG RESULT!\n\n"); return false; } }
ccabfedb59abf30277c472ee9c064d00c9f1a326.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #define NBLOCKS 1024 #define NTHREADS 1024 __global__ void add(int n, float *x, float *y) { for (int i=blockIdx.x*blockDim.x + threadIdx.x; i<n; i+=blockDim.x*gridDim.x) { y[i] = x[i] + y[i]; } } int main(int argc, char* argv[]) { int N = 1<<20; int size = (2*N*sizeof(float))>>20; std::cout << "Memory size " << size << " Mb" << std::endl; float *x, *y; // Allocate unified memory - GPU and CPU accessible hipMalloc(&x, N*sizeof(float)); hipMalloc(&y, N*sizeof(float)); float *a, *b; a = new float[N]; b = new float[N]; // initialise arrays on host for (int i=0; i<N; i++) { a[i] = 1.0f; b[i] = 2.0f; } hipMemcpy(x,a,N*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(y,b,N*sizeof(float),hipMemcpyHostToDevice); // Run kernel on N elts on CPU int nThreads = 1024; int nBlocks = (N + nThreads - 1)/nThreads; std::cout << "Blocks: " << nBlocks << std::endl; std::cout << "Threads: " << nThreads << std::endl; hipLaunchKernelGGL(( add), dim3(nBlocks),dim3(nThreads), 0, 0, N, x, y); // Wait for GPU to finish hipDeviceSynchronize(); hipMemcpy(a,x,N*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(b,y,N*sizeof(float),hipMemcpyDeviceToHost); // Check for errors (All values should be 3.0f; float maxErr = 0.0f; for (int i=0; i<N; i++) { maxErr = fmax(maxErr, fabs(b[i]-3.0f)); } std::cout << "Max err: " << maxErr << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
ccabfedb59abf30277c472ee9c064d00c9f1a326.cu
#include <iostream> #include <math.h> #define NBLOCKS 1024 #define NTHREADS 1024 __global__ void add(int n, float *x, float *y) { for (int i=blockIdx.x*blockDim.x + threadIdx.x; i<n; i+=blockDim.x*gridDim.x) { y[i] = x[i] + y[i]; } } int main(int argc, char* argv[]) { int N = 1<<20; int size = (2*N*sizeof(float))>>20; std::cout << "Memory size " << size << " Mb" << std::endl; float *x, *y; // Allocate unified memory - GPU and CPU accessible cudaMalloc(&x, N*sizeof(float)); cudaMalloc(&y, N*sizeof(float)); float *a, *b; a = new float[N]; b = new float[N]; // initialise arrays on host for (int i=0; i<N; i++) { a[i] = 1.0f; b[i] = 2.0f; } cudaMemcpy(x,a,N*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(y,b,N*sizeof(float),cudaMemcpyHostToDevice); // Run kernel on N elts on CPU int nThreads = 1024; int nBlocks = (N + nThreads - 1)/nThreads; std::cout << "Blocks: " << nBlocks << std::endl; std::cout << "Threads: " << nThreads << std::endl; add<<<nBlocks,nThreads>>>(N, x, y); // Wait for GPU to finish cudaDeviceSynchronize(); cudaMemcpy(a,x,N*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(b,y,N*sizeof(float),cudaMemcpyDeviceToHost); // Check for errors (All values should be 3.0f; float maxErr = 0.0f; for (int i=0; i<N; i++) { maxErr = fmax(maxErr, fabs(b[i]-3.0f)); } std::cout << "Max err: " << maxErr << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
c149d0ae1678bf3c10857b8b54ddfd250f7da1b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "wb.h" #define BLOCK_DIM 64 __global__ void vecAdd(float *in1, float *in2, float *out, int len) { int i = threadIdx.x + (blockDim.x * blockIdx.x); if (i < len) { out[i] = in1[i] + in2[i]; } } int main(int argc, char **argv) { wbArg_t args; int inputLength; size_t inputSize; float *hostInput1; float *hostInput2; float *hostOutput; float *deviceInput1; float *deviceInput2; float *deviceOutput; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput1 = wbImport(wbArg_getInputFile(args, 0), &inputLength); hostInput2 = wbImport(wbArg_getInputFile(args, 1), &inputLength); hostOutput = (float *)malloc(inputLength * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here inputSize = inputLength * sizeof(float); hipMalloc((void **)(&deviceInput1), inputSize); hipMalloc((void **)(&deviceInput2), inputSize); hipMalloc((void **)(&deviceOutput), inputSize); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(Copy, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy((void *)deviceInput1, (void *)hostInput1, inputSize, hipMemcpyHostToDevice); hipMemcpy((void *)deviceInput2, (void *)hostInput2, inputSize, hipMemcpyHostToDevice); wbTime_stop(Copy, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here int gridDim = 1 + ((inputLength - 1) / BLOCK_DIM); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( vecAdd), dim3(gridDim), dim3(BLOCK_DIM), 0, 0, deviceInput1, deviceInput2, deviceOutput, inputLength); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy((void *)hostOutput, (void *)deviceOutput, inputSize, hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree((void *)deviceInput1); hipFree((void *)deviceInput2); hipFree((void *)deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, inputLength); free(hostInput1); free(hostInput2); free(hostOutput); return 0; }
c149d0ae1678bf3c10857b8b54ddfd250f7da1b0.cu
#include "wb.h" #define BLOCK_DIM 64 __global__ void vecAdd(float *in1, float *in2, float *out, int len) { int i = threadIdx.x + (blockDim.x * blockIdx.x); if (i < len) { out[i] = in1[i] + in2[i]; } } int main(int argc, char **argv) { wbArg_t args; int inputLength; size_t inputSize; float *hostInput1; float *hostInput2; float *hostOutput; float *deviceInput1; float *deviceInput2; float *deviceOutput; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput1 = wbImport(wbArg_getInputFile(args, 0), &inputLength); hostInput2 = wbImport(wbArg_getInputFile(args, 1), &inputLength); hostOutput = (float *)malloc(inputLength * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here inputSize = inputLength * sizeof(float); cudaMalloc((void **)(&deviceInput1), inputSize); cudaMalloc((void **)(&deviceInput2), inputSize); cudaMalloc((void **)(&deviceOutput), inputSize); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(Copy, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy((void *)deviceInput1, (void *)hostInput1, inputSize, cudaMemcpyHostToDevice); cudaMemcpy((void *)deviceInput2, (void *)hostInput2, inputSize, cudaMemcpyHostToDevice); wbTime_stop(Copy, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here int gridDim = 1 + ((inputLength - 1) / BLOCK_DIM); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here vecAdd<<<gridDim, BLOCK_DIM>>>(deviceInput1, deviceInput2, deviceOutput, inputLength); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy((void *)hostOutput, (void *)deviceOutput, inputSize, cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree((void *)deviceInput1); cudaFree((void *)deviceInput2); cudaFree((void *)deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, inputLength); free(hostInput1); free(hostInput2); free(hostOutput); return 0; }
ca3ed6e9fd30594837add487291be516e2c2cf1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdint.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> __device__ void partition_by_bit(int *values, int bit); __device__ int plus_scan(int *x); __device__ void radix_sort(int *values); #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { hipEvent_t start; hipEvent_t stop; GpuTimer() { hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer() { hipEventDestroy(start); hipEventDestroy(stop); } void Start() { hipEventRecord(start, 0); hipEventSynchronize(start); } void Stop() { hipEventRecord(stop, 0); } float Elapsed() { float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; __device__ int plus_scan(int *x) { int i = threadIdx.x; // id of thread executing this instance int n = blockDim.x; // total number of threads in this block int offset; // distance between elements to be added for( offset = 1; offset < n; offset *= 2) { int t; if ( i >= offset ) t = x[i-offset]; __syncthreads(); if ( i >= offset ) x[i] = t + x[i]; __syncthreads(); } return x[i]; } __device__ void partition_by_bit(int *values, int bit) { int thread = threadIdx.x; int size = blockDim.x; int x_i = values[thread]; int p_i = (x_i >> bit) & 1; values[thread] = p_i; __syncthreads(); int T_before = plus_scan(values); int T_total = values[size-1]; int F_total = size - T_total; __syncthreads(); if ( p_i ) { values[T_before-1 + F_total] = x_i; } else { values[thread - T_before] = x_i; } } __device__ void radix_sort(int *values) { int bit; for( bit = 0; bit < 32; ++bit ) { partition_by_bit(values, bit); __syncthreads(); } } __global__ void sortBlk(int *in, int n, int *sortedBlocks, int bit, int nBins) { extern __shared__ int s[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { s[threadIdx.x] = (in[i] >> bit) & (nBins - 1); } __syncthreads(); radix_sort(s); __syncthreads(); if(i < n) { sortedBlocks[i] = s[threadIdx.x]; } __syncthreads(); } __global__ void computeHistKernel(int * in, int n, int * hist, int nBins, int gridSize) { extern __shared__ int s[]; for(int i = threadIdx.x; i < nBins; i += blockDim.x) s[i] = 0; __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { int bin = in[i]; atomicAdd(&s[bin], 1); } __syncthreads(); // Each block adds its local hist to global hist using atomic on GMEM for(int i = threadIdx.x; i < nBins; i += blockDim.x) atomicAdd(&hist[blockIdx.x + i * gridSize], s[i]); } __global__ void scanBlkKernel(int * in, int n, int * out) { //TODO extern __shared__ int s[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) s[threadIdx.x] = in[i]; else s[threadIdx.x] = 0; __syncthreads(); int temp; for(int stride = 1; stride < blockDim.x; stride *= 2) { if(threadIdx.x >= stride) temp = s[threadIdx.x - stride]; __syncthreads(); if(threadIdx.x >= stride) s[threadIdx.x] += temp; __syncthreads(); } if(i < n - 1) out[i + 1] = s[threadIdx.x]; out[0] = 0; } __global__ void scatterKernel(int * in, int n, int *sortedBlocks, int *histScan, int * out, int gridSize) { extern __shared__ int s[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { s[threadIdx.x] = sortedBlocks[i]; } __syncthreads(); int before = 0; for(int j = threadIdx.x - 1; j >= 0; j--) if(s[threadIdx.x] == s[j]) before++; __syncthreads(); int index = blockIdx.x + sortedBlocks[i] * gridSize; int rank = histScan[index] + before; out[rank] = in[i]; } __global__ void computeHistKernel2(int * src, int n, int * hist, int nBins, int bit) { // TODO // Each block computes its local hist using atomic on SMEM extern __shared__ int s[]; for(int i = threadIdx.x; i < nBins; i += blockDim.x) s[i] = 0; __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { int bin = (src[i] >> bit) & (nBins -1); atomicAdd(&s[bin], 1); } __syncthreads(); // Each block adds its local hist to global hist using atomic on GMEM for(int i = threadIdx.x; i < nBins; i += blockDim.x) atomicAdd(&hist[i], s[i]); } // (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} // Why "int * blockSizes"? // Because we may want different block sizes for diffrent kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortBit(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes, int bit) { // TODO int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * sizeof(int)); int * histScan = (int *)malloc(nBins * sizeof(int)); // In each counting sort, we sort data in "src" and write result to "dst" // Then, we swap these 2 pointers and go to the next counting sort // At first, we assign "src = in" and "dest = out" // However, the data pointed by "in" is read-only // --> we create a copy of this data and assign "src" to the address of this copy uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; uint32_t * temp; dim3 blockSize1(blockSizes[0]); dim3 blockSize2(blockSizes[1]); // Allocate device memories int * d_hist, *d_histScan, * d_in; CHECK(hipMalloc(&d_in, n * sizeof(int))); CHECK(hipMalloc(&d_hist, nBins * sizeof(int))); CHECK(hipMalloc(&d_histScan, nBins * sizeof(int))); // Call kernel dim3 gridSize1((n - 1) / blockSize1.x + 1); dim3 gridSize2((n - 1) / blockSize2.x + 1); size_t smemSize = nBins*sizeof(int); size_t sharedMemorySizeByte = blockSize2.x * sizeof(int); int *d_blkSums; CHECK(hipMalloc(&d_blkSums, gridSize2.x * sizeof(int))); // TODO: Compute "hist" of the current digit CHECK(hipMemcpy(d_in, src, n * sizeof(int), hipMemcpyHostToDevice)); CHECK(hipMemset(d_hist, 0, nBins * sizeof(int))); hipLaunchKernelGGL(( computeHistKernel2), dim3(gridSize1), dim3(blockSize1), smemSize, 0, d_in, n, d_hist, nBins, bit); // TODO: Scan "hist" (exclusively) and save the result to "histScan" hipLaunchKernelGGL(( scanBlkKernel), dim3(gridSize2), dim3(blockSize2), sharedMemorySizeByte, 0, d_hist, nBins, d_histScan); CHECK(hipMemcpy(hist, d_histScan, nBins * sizeof(int), hipMemcpyDeviceToHost)); // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" for(int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins -1); dst[hist[bin]] = src[i]; hist[bin]++; } // TODO: Swap "src" and "dst" temp = src; src = dst; dst = temp; // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(hist); free(histScan); free(originalSrc); // Free device memories CHECK(hipFree(d_in)); CHECK(hipFree(d_hist)); CHECK(hipFree(d_histScan)) CHECK(hipFree(d_blkSums)); } void sortParallel(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { // TODO int nBins = 1 << nBits; // 2^nBits uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * k = (uint32_t *)malloc(n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; uint32_t * temp; dim3 blockSize1(blockSizes[0]); dim3 blockSize2(blockSizes[1]); // Allocate device memories int * d_hist, *d_histScan, * d_in, *d_sortedBlocks, *d_out, *d_k; CHECK(hipMalloc(&d_in, n * sizeof(int))); CHECK(hipMalloc(&d_out, n * sizeof(int))); CHECK(hipMalloc(&d_sortedBlocks, n * sizeof(int))); CHECK(hipMalloc(&d_k, n * sizeof(int))); // Call kernel dim3 gridSize1((n - 1) / blockSize1.x + 1); dim3 gridSize2((n - 1) / blockSize2.x + 1); CHECK(hipMalloc(&d_hist, nBins * gridSize1.x * sizeof(int))); CHECK(hipMalloc(&d_histScan, nBins * gridSize1.x * sizeof(int))); int * hist = (int *)malloc(nBins * gridSize1.x * sizeof(int)); int * histScan = (int *)malloc(nBins * gridSize1.x * sizeof(int)); size_t smemSize = blockSize1.x*sizeof(int); size_t smemSizeHist = nBins*sizeof(int); uint32_t *block = (uint32_t *)malloc(blockSize1.x * sizeof(int)); uint32_t *block2 = (uint32_t *)malloc(blockSize1.x * sizeof(int)); int m = 0; int mul; GpuTimer timer; int i = 0; for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { printf("%d: \n", i); timer.Start(); CHECK(hipMemcpy(d_in, src, n * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( sortBlk), dim3(gridSize1), dim3(blockSize1), smemSize, 0, d_in, n, d_sortedBlocks, bit, nBins); for(int j = 0; j < n; j++) { block[m] = src[j]; m++; if((j + 1) % blockSize1.x == 0) { m = 0; sortBit(block, blockSize1.x, block2, nBits, blockSizes, bit); mul = (j + 1) / blockSize1.x; for(int l = j + 1 - blockSize1.x; l < mul * blockSize1.x; l++) { k[l] = block2[m]; m++; } m = 0; } } CHECK(hipMemcpy(d_k, k, n * sizeof(int), hipMemcpyHostToDevice)); timer.Stop(); printf("Sort block: %.3f ms\n", timer.Elapsed()); // TODO: Compute "hist" of the current digit timer.Start(); CHECK(hipMemset(d_hist, 0, nBins * gridSize1.x * sizeof(int))); hipLaunchKernelGGL(( computeHistKernel), dim3(gridSize1), dim3(blockSize1), smemSizeHist, 0, d_sortedBlocks, n, d_hist, nBins, gridSize1.x); CHECK(hipMemcpy(hist, d_hist, nBins * gridSize1.x * sizeof(int), hipMemcpyDeviceToHost)); timer.Stop(); printf("Hist: %.3f ms\n", timer.Elapsed()); //TODO: Scan "hist" (exclusively) and save the result to "histScan" timer.Start(); histScan[0] = 0; for (int bin = 1; bin < nBins * gridSize1.x; bin++) histScan[bin] = histScan[bin - 1] + hist[bin - 1]; CHECK(hipMemcpy(d_histScan, histScan, nBins * gridSize1.x * sizeof(int), hipMemcpyHostToDevice)); timer.Stop(); printf("Scan: %.3f ms\n", timer.Elapsed()); // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" hipLaunchKernelGGL(( scatterKernel), dim3(gridSize1), dim3(blockSize1), smemSize, 0, d_k, n, d_sortedBlocks, d_histScan, d_out, gridSize1.x); CHECK(hipMemcpy(dst, d_out, n * sizeof(int), hipMemcpyDeviceToHost)); timer.Stop(); printf("Scatter: %.3f ms\n", timer.Elapsed()); // TODO: Swap "src" and "dst" temp = src; src = dst; dst = temp; i++; } // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(originalSrc); free(block); free(block2); // Free device memories CHECK(hipFree(d_in)); CHECK(hipFree(d_out)); CHECK(hipFree(d_hist)); CHECK(hipFree(d_histScan)); CHECK(hipFree(d_sortedBlocks)); CHECK(hipFree(d_k)); } // (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} // Why "int * blockSizes"? // Because we may want different block sizes for diffrent kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortByDevice(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { // TODO thrust::device_vector<uint32_t> dv_out(in, in + n); thrust::sort(dv_out.begin(), dv_out.end()); thrust::copy(dv_out.begin(), dv_out.end(), out); } // Radix sort void sort(const uint32_t * in, int n, uint32_t * out, int nBits, bool useDevice=false, int * blockSizes=NULL) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nRadix sort Satish parallel\n"); sortParallel(in, n, out, nBits, blockSizes); } else // use device { printf("\nRadix sort by device\n"); sortByDevice(in, n, out, nBits, blockSizes); } timer.Stop(); printf("Time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { hipDeviceProp_t devProv; CHECK(hipGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); printf("%d\n", i); printf("%d\n", out[i]); printf("%d\n", correctOut[i]); return; } } printf("CORRECT :)\n"); } void printArray(uint32_t * a, int n) { for (int i = 0; i < n; i++) printf("%i ", a[i]); printf("\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 20); //n = 16384; //n = 10; printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(uint32_t); uint32_t * in = (uint32_t *)malloc(bytes); uint32_t * out = (uint32_t *)malloc(bytes); // Device result uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = rand(); //printArray(in, n); // SET UP NBITS int nBits = 4; // Default if (argc > 1) nBits = atoi(argv[1]); printf("\nNum bits per digit: %d\n", nBits); // DETERMINE BLOCK SIZES int blockSizes[2] = {512, 512}; // One for histogram, one for scan if (argc == 4) { blockSizes[0] = atoi(argv[2]); blockSizes[1] = atoi(argv[3]); } printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]); sort(in, n, out, nBits, false, blockSizes); //printArray(correctOut, n); // SORT BY DEVICE sort(in, n, correctOut, nBits, true, blockSizes); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
ca3ed6e9fd30594837add487291be516e2c2cf1a.cu
#include <stdio.h> #include <stdint.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> __device__ void partition_by_bit(int *values, int bit); __device__ int plus_scan(int *x); __device__ void radix_sort(int *values); #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); cudaEventSynchronize(start); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; __device__ int plus_scan(int *x) { int i = threadIdx.x; // id of thread executing this instance int n = blockDim.x; // total number of threads in this block int offset; // distance between elements to be added for( offset = 1; offset < n; offset *= 2) { int t; if ( i >= offset ) t = x[i-offset]; __syncthreads(); if ( i >= offset ) x[i] = t + x[i]; __syncthreads(); } return x[i]; } __device__ void partition_by_bit(int *values, int bit) { int thread = threadIdx.x; int size = blockDim.x; int x_i = values[thread]; int p_i = (x_i >> bit) & 1; values[thread] = p_i; __syncthreads(); int T_before = plus_scan(values); int T_total = values[size-1]; int F_total = size - T_total; __syncthreads(); if ( p_i ) { values[T_before-1 + F_total] = x_i; } else { values[thread - T_before] = x_i; } } __device__ void radix_sort(int *values) { int bit; for( bit = 0; bit < 32; ++bit ) { partition_by_bit(values, bit); __syncthreads(); } } __global__ void sortBlk(int *in, int n, int *sortedBlocks, int bit, int nBins) { extern __shared__ int s[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { s[threadIdx.x] = (in[i] >> bit) & (nBins - 1); } __syncthreads(); radix_sort(s); __syncthreads(); if(i < n) { sortedBlocks[i] = s[threadIdx.x]; } __syncthreads(); } __global__ void computeHistKernel(int * in, int n, int * hist, int nBins, int gridSize) { extern __shared__ int s[]; for(int i = threadIdx.x; i < nBins; i += blockDim.x) s[i] = 0; __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { int bin = in[i]; atomicAdd(&s[bin], 1); } __syncthreads(); // Each block adds its local hist to global hist using atomic on GMEM for(int i = threadIdx.x; i < nBins; i += blockDim.x) atomicAdd(&hist[blockIdx.x + i * gridSize], s[i]); } __global__ void scanBlkKernel(int * in, int n, int * out) { //TODO extern __shared__ int s[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) s[threadIdx.x] = in[i]; else s[threadIdx.x] = 0; __syncthreads(); int temp; for(int stride = 1; stride < blockDim.x; stride *= 2) { if(threadIdx.x >= stride) temp = s[threadIdx.x - stride]; __syncthreads(); if(threadIdx.x >= stride) s[threadIdx.x] += temp; __syncthreads(); } if(i < n - 1) out[i + 1] = s[threadIdx.x]; out[0] = 0; } __global__ void scatterKernel(int * in, int n, int *sortedBlocks, int *histScan, int * out, int gridSize) { extern __shared__ int s[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { s[threadIdx.x] = sortedBlocks[i]; } __syncthreads(); int before = 0; for(int j = threadIdx.x - 1; j >= 0; j--) if(s[threadIdx.x] == s[j]) before++; __syncthreads(); int index = blockIdx.x + sortedBlocks[i] * gridSize; int rank = histScan[index] + before; out[rank] = in[i]; } __global__ void computeHistKernel2(int * src, int n, int * hist, int nBins, int bit) { // TODO // Each block computes its local hist using atomic on SMEM extern __shared__ int s[]; for(int i = threadIdx.x; i < nBins; i += blockDim.x) s[i] = 0; __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { int bin = (src[i] >> bit) & (nBins -1); atomicAdd(&s[bin], 1); } __syncthreads(); // Each block adds its local hist to global hist using atomic on GMEM for(int i = threadIdx.x; i < nBins; i += blockDim.x) atomicAdd(&hist[i], s[i]); } // (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} // Why "int * blockSizes"? // Because we may want different block sizes for diffrent kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortBit(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes, int bit) { // TODO int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * sizeof(int)); int * histScan = (int *)malloc(nBins * sizeof(int)); // In each counting sort, we sort data in "src" and write result to "dst" // Then, we swap these 2 pointers and go to the next counting sort // At first, we assign "src = in" and "dest = out" // However, the data pointed by "in" is read-only // --> we create a copy of this data and assign "src" to the address of this copy uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; uint32_t * temp; dim3 blockSize1(blockSizes[0]); dim3 blockSize2(blockSizes[1]); // Allocate device memories int * d_hist, *d_histScan, * d_in; CHECK(cudaMalloc(&d_in, n * sizeof(int))); CHECK(cudaMalloc(&d_hist, nBins * sizeof(int))); CHECK(cudaMalloc(&d_histScan, nBins * sizeof(int))); // Call kernel dim3 gridSize1((n - 1) / blockSize1.x + 1); dim3 gridSize2((n - 1) / blockSize2.x + 1); size_t smemSize = nBins*sizeof(int); size_t sharedMemorySizeByte = blockSize2.x * sizeof(int); int *d_blkSums; CHECK(cudaMalloc(&d_blkSums, gridSize2.x * sizeof(int))); // TODO: Compute "hist" of the current digit CHECK(cudaMemcpy(d_in, src, n * sizeof(int), cudaMemcpyHostToDevice)); CHECK(cudaMemset(d_hist, 0, nBins * sizeof(int))); computeHistKernel2<<<gridSize1, blockSize1, smemSize>>>(d_in, n, d_hist, nBins, bit); // TODO: Scan "hist" (exclusively) and save the result to "histScan" scanBlkKernel<<<gridSize2, blockSize2, sharedMemorySizeByte>>>(d_hist, nBins, d_histScan); CHECK(cudaMemcpy(hist, d_histScan, nBins * sizeof(int), cudaMemcpyDeviceToHost)); // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" for(int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins -1); dst[hist[bin]] = src[i]; hist[bin]++; } // TODO: Swap "src" and "dst" temp = src; src = dst; dst = temp; // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(hist); free(histScan); free(originalSrc); // Free device memories CHECK(cudaFree(d_in)); CHECK(cudaFree(d_hist)); CHECK(cudaFree(d_histScan)) CHECK(cudaFree(d_blkSums)); } void sortParallel(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { // TODO int nBins = 1 << nBits; // 2^nBits uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * k = (uint32_t *)malloc(n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; uint32_t * temp; dim3 blockSize1(blockSizes[0]); dim3 blockSize2(blockSizes[1]); // Allocate device memories int * d_hist, *d_histScan, * d_in, *d_sortedBlocks, *d_out, *d_k; CHECK(cudaMalloc(&d_in, n * sizeof(int))); CHECK(cudaMalloc(&d_out, n * sizeof(int))); CHECK(cudaMalloc(&d_sortedBlocks, n * sizeof(int))); CHECK(cudaMalloc(&d_k, n * sizeof(int))); // Call kernel dim3 gridSize1((n - 1) / blockSize1.x + 1); dim3 gridSize2((n - 1) / blockSize2.x + 1); CHECK(cudaMalloc(&d_hist, nBins * gridSize1.x * sizeof(int))); CHECK(cudaMalloc(&d_histScan, nBins * gridSize1.x * sizeof(int))); int * hist = (int *)malloc(nBins * gridSize1.x * sizeof(int)); int * histScan = (int *)malloc(nBins * gridSize1.x * sizeof(int)); size_t smemSize = blockSize1.x*sizeof(int); size_t smemSizeHist = nBins*sizeof(int); uint32_t *block = (uint32_t *)malloc(blockSize1.x * sizeof(int)); uint32_t *block2 = (uint32_t *)malloc(blockSize1.x * sizeof(int)); int m = 0; int mul; GpuTimer timer; int i = 0; for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { printf("%d: \n", i); timer.Start(); CHECK(cudaMemcpy(d_in, src, n * sizeof(int), cudaMemcpyHostToDevice)); sortBlk<<<gridSize1, blockSize1, smemSize>>>(d_in, n, d_sortedBlocks, bit, nBins); for(int j = 0; j < n; j++) { block[m] = src[j]; m++; if((j + 1) % blockSize1.x == 0) { m = 0; sortBit(block, blockSize1.x, block2, nBits, blockSizes, bit); mul = (j + 1) / blockSize1.x; for(int l = j + 1 - blockSize1.x; l < mul * blockSize1.x; l++) { k[l] = block2[m]; m++; } m = 0; } } CHECK(cudaMemcpy(d_k, k, n * sizeof(int), cudaMemcpyHostToDevice)); timer.Stop(); printf("Sort block: %.3f ms\n", timer.Elapsed()); // TODO: Compute "hist" of the current digit timer.Start(); CHECK(cudaMemset(d_hist, 0, nBins * gridSize1.x * sizeof(int))); computeHistKernel<<<gridSize1, blockSize1, smemSizeHist>>>(d_sortedBlocks, n, d_hist, nBins, gridSize1.x); CHECK(cudaMemcpy(hist, d_hist, nBins * gridSize1.x * sizeof(int), cudaMemcpyDeviceToHost)); timer.Stop(); printf("Hist: %.3f ms\n", timer.Elapsed()); //TODO: Scan "hist" (exclusively) and save the result to "histScan" timer.Start(); histScan[0] = 0; for (int bin = 1; bin < nBins * gridSize1.x; bin++) histScan[bin] = histScan[bin - 1] + hist[bin - 1]; CHECK(cudaMemcpy(d_histScan, histScan, nBins * gridSize1.x * sizeof(int), cudaMemcpyHostToDevice)); timer.Stop(); printf("Scan: %.3f ms\n", timer.Elapsed()); // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" scatterKernel<<<gridSize1, blockSize1, smemSize>>>(d_k, n, d_sortedBlocks, d_histScan, d_out, gridSize1.x); CHECK(cudaMemcpy(dst, d_out, n * sizeof(int), cudaMemcpyDeviceToHost)); timer.Stop(); printf("Scatter: %.3f ms\n", timer.Elapsed()); // TODO: Swap "src" and "dst" temp = src; src = dst; dst = temp; i++; } // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(originalSrc); free(block); free(block2); // Free device memories CHECK(cudaFree(d_in)); CHECK(cudaFree(d_out)); CHECK(cudaFree(d_hist)); CHECK(cudaFree(d_histScan)); CHECK(cudaFree(d_sortedBlocks)); CHECK(cudaFree(d_k)); } // (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} // Why "int * blockSizes"? // Because we may want different block sizes for diffrent kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortByDevice(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { // TODO thrust::device_vector<uint32_t> dv_out(in, in + n); thrust::sort(dv_out.begin(), dv_out.end()); thrust::copy(dv_out.begin(), dv_out.end(), out); } // Radix sort void sort(const uint32_t * in, int n, uint32_t * out, int nBits, bool useDevice=false, int * blockSizes=NULL) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nRadix sort Satish parallel\n"); sortParallel(in, n, out, nBits, blockSizes); } else // use device { printf("\nRadix sort by device\n"); sortByDevice(in, n, out, nBits, blockSizes); } timer.Stop(); printf("Time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { cudaDeviceProp devProv; CHECK(cudaGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); printf("%d\n", i); printf("%d\n", out[i]); printf("%d\n", correctOut[i]); return; } } printf("CORRECT :)\n"); } void printArray(uint32_t * a, int n) { for (int i = 0; i < n; i++) printf("%i ", a[i]); printf("\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 20); //n = 16384; //n = 10; printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(uint32_t); uint32_t * in = (uint32_t *)malloc(bytes); uint32_t * out = (uint32_t *)malloc(bytes); // Device result uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = rand(); //printArray(in, n); // SET UP NBITS int nBits = 4; // Default if (argc > 1) nBits = atoi(argv[1]); printf("\nNum bits per digit: %d\n", nBits); // DETERMINE BLOCK SIZES int blockSizes[2] = {512, 512}; // One for histogram, one for scan if (argc == 4) { blockSizes[0] = atoi(argv[2]); blockSizes[1] = atoi(argv[3]); } printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]); sort(in, n, out, nBits, false, blockSizes); //printArray(correctOut, n); // SORT BY DEVICE sort(in, n, correctOut, nBits, true, blockSizes); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
9247ffca8100fc84a749391a359c9b92458ee44a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "eq_strided_double.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int xOffset = 1; int yOffset = 1; double *dx = NULL; hipMalloc(&dx, XSIZE*YSIZE); double *dy = NULL; hipMalloc(&dy, XSIZE*YSIZE); int incx = 1; int incy = 1; double *result = NULL; hipMalloc(&result, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( eq_strided_double), dim3(gridBlock),dim3(threadBlock), 0, 0, n,xOffset,yOffset,dx,dy,incx,incy,result); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( eq_strided_double), dim3(gridBlock),dim3(threadBlock), 0, 0, n,xOffset,yOffset,dx,dy,incx,incy,result); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( eq_strided_double), dim3(gridBlock),dim3(threadBlock), 0, 0, n,xOffset,yOffset,dx,dy,incx,incy,result); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9247ffca8100fc84a749391a359c9b92458ee44a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "eq_strided_double.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int xOffset = 1; int yOffset = 1; double *dx = NULL; cudaMalloc(&dx, XSIZE*YSIZE); double *dy = NULL; cudaMalloc(&dy, XSIZE*YSIZE); int incx = 1; int incy = 1; double *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); eq_strided_double<<<gridBlock,threadBlock>>>(n,xOffset,yOffset,dx,dy,incx,incy,result); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { eq_strided_double<<<gridBlock,threadBlock>>>(n,xOffset,yOffset,dx,dy,incx,incy,result); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { eq_strided_double<<<gridBlock,threadBlock>>>(n,xOffset,yOffset,dx,dy,incx,incy,result); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}